Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
28#include <linux/slab.h>
29#include <linux/lockdep.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36#include <scsi/fc/fc_fs.h>
37#include <linux/crash_dump.h>
38#ifdef CONFIG_X86
39#include <asm/set_memory.h>
40#endif
41
42#include "lpfc_hw4.h"
43#include "lpfc_hw.h"
44#include "lpfc_sli.h"
45#include "lpfc_sli4.h"
46#include "lpfc_nl.h"
47#include "lpfc_disc.h"
48#include "lpfc.h"
49#include "lpfc_scsi.h"
50#include "lpfc_nvme.h"
51#include "lpfc_crtn.h"
52#include "lpfc_logmsg.h"
53#include "lpfc_compat.h"
54#include "lpfc_debugfs.h"
55#include "lpfc_vport.h"
56#include "lpfc_version.h"
57
58/* There are only four IOCB completion types. */
59typedef enum _lpfc_iocb_type {
60 LPFC_UNKNOWN_IOCB,
61 LPFC_UNSOL_IOCB,
62 LPFC_SOL_IOCB,
63 LPFC_ABORT_IOCB
64} lpfc_iocb_type;
65
66
67/* Provide function prototypes local to this module. */
68static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
69 uint32_t);
70static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
71 uint8_t *, uint32_t *);
72static struct lpfc_iocbq *
73lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
74 struct lpfc_iocbq *rspiocbq);
75static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76 struct hbq_dmabuf *);
77static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 struct hbq_dmabuf *dmabuf);
79static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
81static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
82 int);
83static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84 struct lpfc_queue *eq,
85 struct lpfc_eqe *eqe,
86 enum lpfc_poll_mode poll_mode);
87static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
90static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
91 struct lpfc_queue *cq,
92 struct lpfc_cqe *cqe);
93static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
94 struct lpfc_iocbq *pwqeq,
95 struct lpfc_sglq *sglq);
96
97union lpfc_wqe128 lpfc_iread_cmd_template;
98union lpfc_wqe128 lpfc_iwrite_cmd_template;
99union lpfc_wqe128 lpfc_icmnd_cmd_template;
100
101/* Setup WQE templates for IOs */
102void lpfc_wqe_cmd_template(void)
103{
104 union lpfc_wqe128 *wqe;
105
106 /* IREAD template */
107 wqe = &lpfc_iread_cmd_template;
108 memset(wqe, 0, sizeof(union lpfc_wqe128));
109
110 /* Word 0, 1, 2 - BDE is variable */
111
112 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
113
114 /* Word 4 - total_xfer_len is variable */
115
116 /* Word 5 - is zero */
117
118 /* Word 6 - ctxt_tag, xri_tag is variable */
119
120 /* Word 7 */
121 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
122 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
123 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
124 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
125
126 /* Word 8 - abort_tag is variable */
127
128 /* Word 9 - reqtag is variable */
129
130 /* Word 10 - dbde, wqes is variable */
131 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
132 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
133 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
134 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
135 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
136
137 /* Word 11 - pbde is variable */
138 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
139 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
140 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
141
142 /* Word 12 - is zero */
143
144 /* Word 13, 14, 15 - PBDE is variable */
145
146 /* IWRITE template */
147 wqe = &lpfc_iwrite_cmd_template;
148 memset(wqe, 0, sizeof(union lpfc_wqe128));
149
150 /* Word 0, 1, 2 - BDE is variable */
151
152 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
153
154 /* Word 4 - total_xfer_len is variable */
155
156 /* Word 5 - initial_xfer_len is variable */
157
158 /* Word 6 - ctxt_tag, xri_tag is variable */
159
160 /* Word 7 */
161 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
162 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
163 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
164 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
165
166 /* Word 8 - abort_tag is variable */
167
168 /* Word 9 - reqtag is variable */
169
170 /* Word 10 - dbde, wqes is variable */
171 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
172 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
173 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
174 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
175 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
176
177 /* Word 11 - pbde is variable */
178 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
179 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
180 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
181
182 /* Word 12 - is zero */
183
184 /* Word 13, 14, 15 - PBDE is variable */
185
186 /* ICMND template */
187 wqe = &lpfc_icmnd_cmd_template;
188 memset(wqe, 0, sizeof(union lpfc_wqe128));
189
190 /* Word 0, 1, 2 - BDE is variable */
191
192 /* Word 3 - payload_offset_len is variable */
193
194 /* Word 4, 5 - is zero */
195
196 /* Word 6 - ctxt_tag, xri_tag is variable */
197
198 /* Word 7 */
199 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
200 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
201 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
202 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
203
204 /* Word 8 - abort_tag is variable */
205
206 /* Word 9 - reqtag is variable */
207
208 /* Word 10 - dbde, wqes is variable */
209 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
210 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
211 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
212 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
213 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
214
215 /* Word 11 */
216 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
217 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
218 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
219
220 /* Word 12, 13, 14, 15 - is zero */
221}
222
223#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
224/**
225 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
226 * @srcp: Source memory pointer.
227 * @destp: Destination memory pointer.
228 * @cnt: Number of words required to be copied.
229 * Must be a multiple of sizeof(uint64_t)
230 *
231 * This function is used for copying data between driver memory
232 * and the SLI WQ. This function also changes the endianness
233 * of each word if native endianness is different from SLI
234 * endianness. This function can be called with or without
235 * lock.
236 **/
237static void
238lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
239{
240 uint64_t *src = srcp;
241 uint64_t *dest = destp;
242 int i;
243
244 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
245 *dest++ = *src++;
246}
247#else
248#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
249#endif
250
251/**
252 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
253 * @q: The Work Queue to operate on.
254 * @wqe: The work Queue Entry to put on the Work queue.
255 *
256 * This routine will copy the contents of @wqe to the next available entry on
257 * the @q. This function will then ring the Work Queue Doorbell to signal the
258 * HBA to start processing the Work Queue Entry. This function returns 0 if
259 * successful. If no entries are available on @q then this function will return
260 * -ENOMEM.
261 * The caller is expected to hold the hbalock when calling this routine.
262 **/
263static int
264lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
265{
266 union lpfc_wqe *temp_wqe;
267 struct lpfc_register doorbell;
268 uint32_t host_index;
269 uint32_t idx;
270 uint32_t i = 0;
271 uint8_t *tmp;
272 u32 if_type;
273
274 /* sanity check on queue memory */
275 if (unlikely(!q))
276 return -ENOMEM;
277
278 temp_wqe = lpfc_sli4_qe(q, q->host_index);
279
280 /* If the host has not yet processed the next entry then we are done */
281 idx = ((q->host_index + 1) % q->entry_count);
282 if (idx == q->hba_index) {
283 q->WQ_overflow++;
284 return -EBUSY;
285 }
286 q->WQ_posted++;
287 /* set consumption flag every once in a while */
288 if (!((q->host_index + 1) % q->notify_interval))
289 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
290 else
291 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
292 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
293 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
294 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
295 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
296 /* write to DPP aperture taking advatage of Combined Writes */
297 tmp = (uint8_t *)temp_wqe;
298#ifdef __raw_writeq
299 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
300 __raw_writeq(*((uint64_t *)(tmp + i)),
301 q->dpp_regaddr + i);
302#else
303 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
304 __raw_writel(*((uint32_t *)(tmp + i)),
305 q->dpp_regaddr + i);
306#endif
307 }
308 /* ensure WQE bcopy and DPP flushed before doorbell write */
309 wmb();
310
311 /* Update the host index before invoking device */
312 host_index = q->host_index;
313
314 q->host_index = idx;
315
316 /* Ring Doorbell */
317 doorbell.word0 = 0;
318 if (q->db_format == LPFC_DB_LIST_FORMAT) {
319 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
320 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
321 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
322 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
323 q->dpp_id);
324 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
325 q->queue_id);
326 } else {
327 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
328 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
329
330 /* Leave bits <23:16> clear for if_type 6 dpp */
331 if_type = bf_get(lpfc_sli_intf_if_type,
332 &q->phba->sli4_hba.sli_intf);
333 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
334 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
335 host_index);
336 }
337 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
338 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
339 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
340 } else {
341 return -EINVAL;
342 }
343 writel(doorbell.word0, q->db_regaddr);
344
345 return 0;
346}
347
348/**
349 * lpfc_sli4_wq_release - Updates internal hba index for WQ
350 * @q: The Work Queue to operate on.
351 * @index: The index to advance the hba index to.
352 *
353 * This routine will update the HBA index of a queue to reflect consumption of
354 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
355 * an entry the host calls this function to update the queue's internal
356 * pointers.
357 **/
358static void
359lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
360{
361 /* sanity check on queue memory */
362 if (unlikely(!q))
363 return;
364
365 q->hba_index = index;
366}
367
368/**
369 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
370 * @q: The Mailbox Queue to operate on.
371 * @mqe: The Mailbox Queue Entry to put on the Work queue.
372 *
373 * This routine will copy the contents of @mqe to the next available entry on
374 * the @q. This function will then ring the Work Queue Doorbell to signal the
375 * HBA to start processing the Work Queue Entry. This function returns 0 if
376 * successful. If no entries are available on @q then this function will return
377 * -ENOMEM.
378 * The caller is expected to hold the hbalock when calling this routine.
379 **/
380static uint32_t
381lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
382{
383 struct lpfc_mqe *temp_mqe;
384 struct lpfc_register doorbell;
385
386 /* sanity check on queue memory */
387 if (unlikely(!q))
388 return -ENOMEM;
389 temp_mqe = lpfc_sli4_qe(q, q->host_index);
390
391 /* If the host has not yet processed the next entry then we are done */
392 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
393 return -ENOMEM;
394 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
395 /* Save off the mailbox pointer for completion */
396 q->phba->mbox = (MAILBOX_t *)temp_mqe;
397
398 /* Update the host index before invoking device */
399 q->host_index = ((q->host_index + 1) % q->entry_count);
400
401 /* Ring Doorbell */
402 doorbell.word0 = 0;
403 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
404 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
405 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
406 return 0;
407}
408
409/**
410 * lpfc_sli4_mq_release - Updates internal hba index for MQ
411 * @q: The Mailbox Queue to operate on.
412 *
413 * This routine will update the HBA index of a queue to reflect consumption of
414 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
415 * an entry the host calls this function to update the queue's internal
416 * pointers. This routine returns the number of entries that were consumed by
417 * the HBA.
418 **/
419static uint32_t
420lpfc_sli4_mq_release(struct lpfc_queue *q)
421{
422 /* sanity check on queue memory */
423 if (unlikely(!q))
424 return 0;
425
426 /* Clear the mailbox pointer for completion */
427 q->phba->mbox = NULL;
428 q->hba_index = ((q->hba_index + 1) % q->entry_count);
429 return 1;
430}
431
432/**
433 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
434 * @q: The Event Queue to get the first valid EQE from
435 *
436 * This routine will get the first valid Event Queue Entry from @q, update
437 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
438 * the Queue (no more work to do), or the Queue is full of EQEs that have been
439 * processed, but not popped back to the HBA then this routine will return NULL.
440 **/
441static struct lpfc_eqe *
442lpfc_sli4_eq_get(struct lpfc_queue *q)
443{
444 struct lpfc_eqe *eqe;
445
446 /* sanity check on queue memory */
447 if (unlikely(!q))
448 return NULL;
449 eqe = lpfc_sli4_qe(q, q->host_index);
450
451 /* If the next EQE is not valid then we are done */
452 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
453 return NULL;
454
455 /*
456 * insert barrier for instruction interlock : data from the hardware
457 * must have the valid bit checked before it can be copied and acted
458 * upon. Speculative instructions were allowing a bcopy at the start
459 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
460 * after our return, to copy data before the valid bit check above
461 * was done. As such, some of the copied data was stale. The barrier
462 * ensures the check is before any data is copied.
463 */
464 mb();
465 return eqe;
466}
467
468/**
469 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
470 * @q: The Event Queue to disable interrupts
471 *
472 **/
473void
474lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
475{
476 struct lpfc_register doorbell;
477
478 doorbell.word0 = 0;
479 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
480 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
481 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
482 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
483 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
484 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
485}
486
487/**
488 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
489 * @q: The Event Queue to disable interrupts
490 *
491 **/
492void
493lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
494{
495 struct lpfc_register doorbell;
496
497 doorbell.word0 = 0;
498 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
499 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
500}
501
502/**
503 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
504 * @phba: adapter with EQ
505 * @q: The Event Queue that the host has completed processing for.
506 * @count: Number of elements that have been consumed
507 * @arm: Indicates whether the host wants to arms this CQ.
508 *
509 * This routine will notify the HBA, by ringing the doorbell, that count
510 * number of EQEs have been processed. The @arm parameter indicates whether
511 * the queue should be rearmed when ringing the doorbell.
512 **/
513void
514lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
515 uint32_t count, bool arm)
516{
517 struct lpfc_register doorbell;
518
519 /* sanity check on queue memory */
520 if (unlikely(!q || (count == 0 && !arm)))
521 return;
522
523 /* ring doorbell for number popped */
524 doorbell.word0 = 0;
525 if (arm) {
526 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
527 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
528 }
529 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
530 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
531 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
532 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
533 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
534 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
535 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
536 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
537 readl(q->phba->sli4_hba.EQDBregaddr);
538}
539
540/**
541 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
542 * @phba: adapter with EQ
543 * @q: The Event Queue that the host has completed processing for.
544 * @count: Number of elements that have been consumed
545 * @arm: Indicates whether the host wants to arms this CQ.
546 *
547 * This routine will notify the HBA, by ringing the doorbell, that count
548 * number of EQEs have been processed. The @arm parameter indicates whether
549 * the queue should be rearmed when ringing the doorbell.
550 **/
551void
552lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
553 uint32_t count, bool arm)
554{
555 struct lpfc_register doorbell;
556
557 /* sanity check on queue memory */
558 if (unlikely(!q || (count == 0 && !arm)))
559 return;
560
561 /* ring doorbell for number popped */
562 doorbell.word0 = 0;
563 if (arm)
564 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
565 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
566 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
567 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
568 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
569 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
570 readl(q->phba->sli4_hba.EQDBregaddr);
571}
572
573static void
574__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
575 struct lpfc_eqe *eqe)
576{
577 if (!phba->sli4_hba.pc_sli4_params.eqav)
578 bf_set_le32(lpfc_eqe_valid, eqe, 0);
579
580 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
581
582 /* if the index wrapped around, toggle the valid bit */
583 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
584 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
585}
586
587static void
588lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
589{
590 struct lpfc_eqe *eqe = NULL;
591 u32 eq_count = 0, cq_count = 0;
592 struct lpfc_cqe *cqe = NULL;
593 struct lpfc_queue *cq = NULL, *childq = NULL;
594 int cqid = 0;
595
596 /* walk all the EQ entries and drop on the floor */
597 eqe = lpfc_sli4_eq_get(eq);
598 while (eqe) {
599 /* Get the reference to the corresponding CQ */
600 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
601 cq = NULL;
602
603 list_for_each_entry(childq, &eq->child_list, list) {
604 if (childq->queue_id == cqid) {
605 cq = childq;
606 break;
607 }
608 }
609 /* If CQ is valid, iterate through it and drop all the CQEs */
610 if (cq) {
611 cqe = lpfc_sli4_cq_get(cq);
612 while (cqe) {
613 __lpfc_sli4_consume_cqe(phba, cq, cqe);
614 cq_count++;
615 cqe = lpfc_sli4_cq_get(cq);
616 }
617 /* Clear and re-arm the CQ */
618 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
619 LPFC_QUEUE_REARM);
620 cq_count = 0;
621 }
622 __lpfc_sli4_consume_eqe(phba, eq, eqe);
623 eq_count++;
624 eqe = lpfc_sli4_eq_get(eq);
625 }
626
627 /* Clear and re-arm the EQ */
628 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
629}
630
631static int
632lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
633 u8 rearm, enum lpfc_poll_mode poll_mode)
634{
635 struct lpfc_eqe *eqe;
636 int count = 0, consumed = 0;
637
638 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
639 goto rearm_and_exit;
640
641 eqe = lpfc_sli4_eq_get(eq);
642 while (eqe) {
643 lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode);
644 __lpfc_sli4_consume_eqe(phba, eq, eqe);
645
646 consumed++;
647 if (!(++count % eq->max_proc_limit))
648 break;
649
650 if (!(count % eq->notify_interval)) {
651 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
652 LPFC_QUEUE_NOARM);
653 consumed = 0;
654 }
655
656 eqe = lpfc_sli4_eq_get(eq);
657 }
658 eq->EQ_processed += count;
659
660 /* Track the max number of EQEs processed in 1 intr */
661 if (count > eq->EQ_max_eqe)
662 eq->EQ_max_eqe = count;
663
664 xchg(&eq->queue_claimed, 0);
665
666rearm_and_exit:
667 /* Always clear the EQ. */
668 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
669
670 return count;
671}
672
673/**
674 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
675 * @q: The Completion Queue to get the first valid CQE from
676 *
677 * This routine will get the first valid Completion Queue Entry from @q, update
678 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
679 * the Queue (no more work to do), or the Queue is full of CQEs that have been
680 * processed, but not popped back to the HBA then this routine will return NULL.
681 **/
682static struct lpfc_cqe *
683lpfc_sli4_cq_get(struct lpfc_queue *q)
684{
685 struct lpfc_cqe *cqe;
686
687 /* sanity check on queue memory */
688 if (unlikely(!q))
689 return NULL;
690 cqe = lpfc_sli4_qe(q, q->host_index);
691
692 /* If the next CQE is not valid then we are done */
693 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
694 return NULL;
695
696 /*
697 * insert barrier for instruction interlock : data from the hardware
698 * must have the valid bit checked before it can be copied and acted
699 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
700 * instructions allowing action on content before valid bit checked,
701 * add barrier here as well. May not be needed as "content" is a
702 * single 32-bit entity here (vs multi word structure for cq's).
703 */
704 mb();
705 return cqe;
706}
707
708static void
709__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
710 struct lpfc_cqe *cqe)
711{
712 if (!phba->sli4_hba.pc_sli4_params.cqav)
713 bf_set_le32(lpfc_cqe_valid, cqe, 0);
714
715 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
716
717 /* if the index wrapped around, toggle the valid bit */
718 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
719 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
720}
721
722/**
723 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
724 * @phba: the adapter with the CQ
725 * @q: The Completion Queue that the host has completed processing for.
726 * @count: the number of elements that were consumed
727 * @arm: Indicates whether the host wants to arms this CQ.
728 *
729 * This routine will notify the HBA, by ringing the doorbell, that the
730 * CQEs have been processed. The @arm parameter specifies whether the
731 * queue should be rearmed when ringing the doorbell.
732 **/
733void
734lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
735 uint32_t count, bool arm)
736{
737 struct lpfc_register doorbell;
738
739 /* sanity check on queue memory */
740 if (unlikely(!q || (count == 0 && !arm)))
741 return;
742
743 /* ring doorbell for number popped */
744 doorbell.word0 = 0;
745 if (arm)
746 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
747 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
748 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
749 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
750 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
751 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
752 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
753}
754
755/**
756 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
757 * @phba: the adapter with the CQ
758 * @q: The Completion Queue that the host has completed processing for.
759 * @count: the number of elements that were consumed
760 * @arm: Indicates whether the host wants to arms this CQ.
761 *
762 * This routine will notify the HBA, by ringing the doorbell, that the
763 * CQEs have been processed. The @arm parameter specifies whether the
764 * queue should be rearmed when ringing the doorbell.
765 **/
766void
767lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
768 uint32_t count, bool arm)
769{
770 struct lpfc_register doorbell;
771
772 /* sanity check on queue memory */
773 if (unlikely(!q || (count == 0 && !arm)))
774 return;
775
776 /* ring doorbell for number popped */
777 doorbell.word0 = 0;
778 if (arm)
779 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
780 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
781 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
782 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
783}
784
785/*
786 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
787 *
788 * This routine will copy the contents of @wqe to the next available entry on
789 * the @q. This function will then ring the Receive Queue Doorbell to signal the
790 * HBA to start processing the Receive Queue Entry. This function returns the
791 * index that the rqe was copied to if successful. If no entries are available
792 * on @q then this function will return -ENOMEM.
793 * The caller is expected to hold the hbalock when calling this routine.
794 **/
795int
796lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
797 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
798{
799 struct lpfc_rqe *temp_hrqe;
800 struct lpfc_rqe *temp_drqe;
801 struct lpfc_register doorbell;
802 int hq_put_index;
803 int dq_put_index;
804
805 /* sanity check on queue memory */
806 if (unlikely(!hq) || unlikely(!dq))
807 return -ENOMEM;
808 hq_put_index = hq->host_index;
809 dq_put_index = dq->host_index;
810 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
811 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
812
813 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
814 return -EINVAL;
815 if (hq_put_index != dq_put_index)
816 return -EINVAL;
817 /* If the host has not yet processed the next entry then we are done */
818 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
819 return -EBUSY;
820 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
821 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
822
823 /* Update the host index to point to the next slot */
824 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
825 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
826 hq->RQ_buf_posted++;
827
828 /* Ring The Header Receive Queue Doorbell */
829 if (!(hq->host_index % hq->notify_interval)) {
830 doorbell.word0 = 0;
831 if (hq->db_format == LPFC_DB_RING_FORMAT) {
832 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
833 hq->notify_interval);
834 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
835 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
836 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
837 hq->notify_interval);
838 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
839 hq->host_index);
840 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
841 } else {
842 return -EINVAL;
843 }
844 writel(doorbell.word0, hq->db_regaddr);
845 }
846 return hq_put_index;
847}
848
849/*
850 * lpfc_sli4_rq_release - Updates internal hba index for RQ
851 *
852 * This routine will update the HBA index of a queue to reflect consumption of
853 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
854 * consumed an entry the host calls this function to update the queue's
855 * internal pointers. This routine returns the number of entries that were
856 * consumed by the HBA.
857 **/
858static uint32_t
859lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
860{
861 /* sanity check on queue memory */
862 if (unlikely(!hq) || unlikely(!dq))
863 return 0;
864
865 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
866 return 0;
867 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
868 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
869 return 1;
870}
871
872/**
873 * lpfc_cmd_iocb - Get next command iocb entry in the ring
874 * @phba: Pointer to HBA context object.
875 * @pring: Pointer to driver SLI ring object.
876 *
877 * This function returns pointer to next command iocb entry
878 * in the command ring. The caller must hold hbalock to prevent
879 * other threads consume the next command iocb.
880 * SLI-2/SLI-3 provide different sized iocbs.
881 **/
882static inline IOCB_t *
883lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
884{
885 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
886 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
887}
888
889/**
890 * lpfc_resp_iocb - Get next response iocb entry in the ring
891 * @phba: Pointer to HBA context object.
892 * @pring: Pointer to driver SLI ring object.
893 *
894 * This function returns pointer to next response iocb entry
895 * in the response ring. The caller must hold hbalock to make sure
896 * that no other thread consume the next response iocb.
897 * SLI-2/SLI-3 provide different sized iocbs.
898 **/
899static inline IOCB_t *
900lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
901{
902 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
903 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
904}
905
906/**
907 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
908 * @phba: Pointer to HBA context object.
909 *
910 * This function is called with hbalock held. This function
911 * allocates a new driver iocb object from the iocb pool. If the
912 * allocation is successful, it returns pointer to the newly
913 * allocated iocb object else it returns NULL.
914 **/
915struct lpfc_iocbq *
916__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
917{
918 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
919 struct lpfc_iocbq * iocbq = NULL;
920
921 lockdep_assert_held(&phba->hbalock);
922
923 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
924 if (iocbq)
925 phba->iocb_cnt++;
926 if (phba->iocb_cnt > phba->iocb_max)
927 phba->iocb_max = phba->iocb_cnt;
928 return iocbq;
929}
930
931/**
932 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
933 * @phba: Pointer to HBA context object.
934 * @xritag: XRI value.
935 *
936 * This function clears the sglq pointer from the array of active
937 * sglq's. The xritag that is passed in is used to index into the
938 * array. Before the xritag can be used it needs to be adjusted
939 * by subtracting the xribase.
940 *
941 * Returns sglq ponter = success, NULL = Failure.
942 **/
943struct lpfc_sglq *
944__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
945{
946 struct lpfc_sglq *sglq;
947
948 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
949 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
950 return sglq;
951}
952
953/**
954 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
955 * @phba: Pointer to HBA context object.
956 * @xritag: XRI value.
957 *
958 * This function returns the sglq pointer from the array of active
959 * sglq's. The xritag that is passed in is used to index into the
960 * array. Before the xritag can be used it needs to be adjusted
961 * by subtracting the xribase.
962 *
963 * Returns sglq ponter = success, NULL = Failure.
964 **/
965struct lpfc_sglq *
966__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
967{
968 struct lpfc_sglq *sglq;
969
970 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
971 return sglq;
972}
973
974/**
975 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
976 * @phba: Pointer to HBA context object.
977 * @xritag: xri used in this exchange.
978 * @rrq: The RRQ to be cleared.
979 *
980 **/
981void
982lpfc_clr_rrq_active(struct lpfc_hba *phba,
983 uint16_t xritag,
984 struct lpfc_node_rrq *rrq)
985{
986 struct lpfc_nodelist *ndlp = NULL;
987
988 /* Lookup did to verify if did is still active on this vport */
989 if (rrq->vport)
990 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
991
992 if (!ndlp)
993 goto out;
994
995 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
996 rrq->send_rrq = 0;
997 rrq->xritag = 0;
998 rrq->rrq_stop_time = 0;
999 }
1000out:
1001 mempool_free(rrq, phba->rrq_pool);
1002}
1003
1004/**
1005 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1006 * @phba: Pointer to HBA context object.
1007 *
1008 * This function is called with hbalock held. This function
1009 * Checks if stop_time (ratov from setting rrq active) has
1010 * been reached, if it has and the send_rrq flag is set then
1011 * it will call lpfc_send_rrq. If the send_rrq flag is not set
1012 * then it will just call the routine to clear the rrq and
1013 * free the rrq resource.
1014 * The timer is set to the next rrq that is going to expire before
1015 * leaving the routine.
1016 *
1017 **/
1018void
1019lpfc_handle_rrq_active(struct lpfc_hba *phba)
1020{
1021 struct lpfc_node_rrq *rrq;
1022 struct lpfc_node_rrq *nextrrq;
1023 unsigned long next_time;
1024 unsigned long iflags;
1025 LIST_HEAD(send_rrq);
1026
1027 clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1028 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1029 spin_lock_irqsave(&phba->rrq_list_lock, iflags);
1030 list_for_each_entry_safe(rrq, nextrrq,
1031 &phba->active_rrq_list, list) {
1032 if (time_after(jiffies, rrq->rrq_stop_time))
1033 list_move(&rrq->list, &send_rrq);
1034 else if (time_before(rrq->rrq_stop_time, next_time))
1035 next_time = rrq->rrq_stop_time;
1036 }
1037 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1038 if ((!list_empty(&phba->active_rrq_list)) &&
1039 (!test_bit(FC_UNLOADING, &phba->pport->load_flag)))
1040 mod_timer(&phba->rrq_tmr, next_time);
1041 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1042 list_del(&rrq->list);
1043 if (!rrq->send_rrq) {
1044 /* this call will free the rrq */
1045 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1046 } else if (lpfc_send_rrq(phba, rrq)) {
1047 /* if we send the rrq then the completion handler
1048 * will clear the bit in the xribitmap.
1049 */
1050 lpfc_clr_rrq_active(phba, rrq->xritag,
1051 rrq);
1052 }
1053 }
1054}
1055
1056/**
1057 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1058 * @vport: Pointer to vport context object.
1059 * @xri: The xri used in the exchange.
1060 * @did: The targets DID for this exchange.
1061 *
1062 * returns NULL = rrq not found in the phba->active_rrq_list.
1063 * rrq = rrq for this xri and target.
1064 **/
1065struct lpfc_node_rrq *
1066lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1067{
1068 struct lpfc_hba *phba = vport->phba;
1069 struct lpfc_node_rrq *rrq;
1070 struct lpfc_node_rrq *nextrrq;
1071 unsigned long iflags;
1072
1073 if (phba->sli_rev != LPFC_SLI_REV4)
1074 return NULL;
1075 spin_lock_irqsave(&phba->rrq_list_lock, iflags);
1076 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1077 if (rrq->vport == vport && rrq->xritag == xri &&
1078 rrq->nlp_DID == did){
1079 list_del(&rrq->list);
1080 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1081 return rrq;
1082 }
1083 }
1084 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1085 return NULL;
1086}
1087
1088/**
1089 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1090 * @vport: Pointer to vport context object.
1091 * @ndlp: Pointer to the lpfc_node_list structure.
1092 * If ndlp is NULL Remove all active RRQs for this vport from the
1093 * phba->active_rrq_list and clear the rrq.
1094 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1095 **/
1096void
1097lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1098
1099{
1100 struct lpfc_hba *phba = vport->phba;
1101 struct lpfc_node_rrq *rrq;
1102 struct lpfc_node_rrq *nextrrq;
1103 unsigned long iflags;
1104 LIST_HEAD(rrq_list);
1105
1106 if (phba->sli_rev != LPFC_SLI_REV4)
1107 return;
1108 if (!ndlp) {
1109 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1110 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1111 }
1112 spin_lock_irqsave(&phba->rrq_list_lock, iflags);
1113 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1114 if (rrq->vport != vport)
1115 continue;
1116
1117 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1118 list_move(&rrq->list, &rrq_list);
1119
1120 }
1121 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1122
1123 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1124 list_del(&rrq->list);
1125 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1126 }
1127}
1128
1129/**
1130 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1131 * @phba: Pointer to HBA context object.
1132 * @ndlp: Targets nodelist pointer for this exchange.
1133 * @xritag: the xri in the bitmap to test.
1134 *
1135 * This function returns:
1136 * 0 = rrq not active for this xri
1137 * 1 = rrq is valid for this xri.
1138 **/
1139int
1140lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1141 uint16_t xritag)
1142{
1143 if (!ndlp)
1144 return 0;
1145 if (!ndlp->active_rrqs_xri_bitmap)
1146 return 0;
1147 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1148 return 1;
1149 else
1150 return 0;
1151}
1152
1153/**
1154 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1155 * @phba: Pointer to HBA context object.
1156 * @ndlp: nodelist pointer for this target.
1157 * @xritag: xri used in this exchange.
1158 * @rxid: Remote Exchange ID.
1159 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1160 *
1161 * This function takes the hbalock.
1162 * The active bit is always set in the active rrq xri_bitmap even
1163 * if there is no slot avaiable for the other rrq information.
1164 *
1165 * returns 0 rrq actived for this xri
1166 * < 0 No memory or invalid ndlp.
1167 **/
1168int
1169lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1170 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1171{
1172 unsigned long iflags;
1173 struct lpfc_node_rrq *rrq;
1174 int empty;
1175
1176 if (!ndlp)
1177 return -EINVAL;
1178
1179 if (!phba->cfg_enable_rrq)
1180 return -EINVAL;
1181
1182 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
1183 clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1184 goto outnl;
1185 }
1186
1187 spin_lock_irqsave(&phba->hbalock, iflags);
1188 if (ndlp->vport && test_bit(FC_UNLOADING, &ndlp->vport->load_flag))
1189 goto out;
1190
1191 if (!ndlp->active_rrqs_xri_bitmap)
1192 goto out;
1193
1194 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1195 goto out;
1196
1197 spin_unlock_irqrestore(&phba->hbalock, iflags);
1198 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1199 if (!rrq) {
1200 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1201 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1202 " DID:0x%x Send:%d\n",
1203 xritag, rxid, ndlp->nlp_DID, send_rrq);
1204 return -EINVAL;
1205 }
1206 if (phba->cfg_enable_rrq == 1)
1207 rrq->send_rrq = send_rrq;
1208 else
1209 rrq->send_rrq = 0;
1210 rrq->xritag = xritag;
1211 rrq->rrq_stop_time = jiffies +
1212 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1213 rrq->nlp_DID = ndlp->nlp_DID;
1214 rrq->vport = ndlp->vport;
1215 rrq->rxid = rxid;
1216
1217 spin_lock_irqsave(&phba->rrq_list_lock, iflags);
1218 empty = list_empty(&phba->active_rrq_list);
1219 list_add_tail(&rrq->list, &phba->active_rrq_list);
1220 spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1221 set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1222 if (empty)
1223 lpfc_worker_wake_up(phba);
1224 return 0;
1225out:
1226 spin_unlock_irqrestore(&phba->hbalock, iflags);
1227outnl:
1228 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1229 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1230 " DID:0x%x Send:%d\n",
1231 xritag, rxid, ndlp->nlp_DID, send_rrq);
1232 return -EINVAL;
1233}
1234
1235/**
1236 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1237 * @phba: Pointer to HBA context object.
1238 * @piocbq: Pointer to the iocbq.
1239 *
1240 * The driver calls this function with either the nvme ls ring lock
1241 * or the fc els ring lock held depending on the iocb usage. This function
1242 * gets a new driver sglq object from the sglq list. If the list is not empty
1243 * then it is successful, it returns pointer to the newly allocated sglq
1244 * object else it returns NULL.
1245 **/
1246static struct lpfc_sglq *
1247__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1248{
1249 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1250 struct lpfc_sglq *sglq = NULL;
1251 struct lpfc_sglq *start_sglq = NULL;
1252 struct lpfc_io_buf *lpfc_cmd;
1253 struct lpfc_nodelist *ndlp;
1254 int found = 0;
1255 u8 cmnd;
1256
1257 cmnd = get_job_cmnd(phba, piocbq);
1258
1259 if (piocbq->cmd_flag & LPFC_IO_FCP) {
1260 lpfc_cmd = piocbq->io_buf;
1261 ndlp = lpfc_cmd->rdata->pnode;
1262 } else if ((cmnd == CMD_GEN_REQUEST64_CR) &&
1263 !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
1264 ndlp = piocbq->ndlp;
1265 } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
1266 if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
1267 ndlp = NULL;
1268 else
1269 ndlp = piocbq->ndlp;
1270 } else {
1271 ndlp = piocbq->ndlp;
1272 }
1273
1274 spin_lock(&phba->sli4_hba.sgl_list_lock);
1275 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1276 start_sglq = sglq;
1277 while (!found) {
1278 if (!sglq)
1279 break;
1280 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1281 test_bit(sglq->sli4_lxritag,
1282 ndlp->active_rrqs_xri_bitmap)) {
1283 /* This xri has an rrq outstanding for this DID.
1284 * put it back in the list and get another xri.
1285 */
1286 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1287 sglq = NULL;
1288 list_remove_head(lpfc_els_sgl_list, sglq,
1289 struct lpfc_sglq, list);
1290 if (sglq == start_sglq) {
1291 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1292 sglq = NULL;
1293 break;
1294 } else
1295 continue;
1296 }
1297 sglq->ndlp = ndlp;
1298 found = 1;
1299 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1300 sglq->state = SGL_ALLOCATED;
1301 }
1302 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1303 return sglq;
1304}
1305
1306/**
1307 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1308 * @phba: Pointer to HBA context object.
1309 * @piocbq: Pointer to the iocbq.
1310 *
1311 * This function is called with the sgl_list lock held. This function
1312 * gets a new driver sglq object from the sglq list. If the
1313 * list is not empty then it is successful, it returns pointer to the newly
1314 * allocated sglq object else it returns NULL.
1315 **/
1316struct lpfc_sglq *
1317__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1318{
1319 struct list_head *lpfc_nvmet_sgl_list;
1320 struct lpfc_sglq *sglq = NULL;
1321
1322 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1323
1324 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1325
1326 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1327 if (!sglq)
1328 return NULL;
1329 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1330 sglq->state = SGL_ALLOCATED;
1331 return sglq;
1332}
1333
1334/**
1335 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1336 * @phba: Pointer to HBA context object.
1337 *
1338 * This function is called with no lock held. This function
1339 * allocates a new driver iocb object from the iocb pool. If the
1340 * allocation is successful, it returns pointer to the newly
1341 * allocated iocb object else it returns NULL.
1342 **/
1343struct lpfc_iocbq *
1344lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1345{
1346 struct lpfc_iocbq * iocbq = NULL;
1347 unsigned long iflags;
1348
1349 spin_lock_irqsave(&phba->hbalock, iflags);
1350 iocbq = __lpfc_sli_get_iocbq(phba);
1351 spin_unlock_irqrestore(&phba->hbalock, iflags);
1352 return iocbq;
1353}
1354
1355/**
1356 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1357 * @phba: Pointer to HBA context object.
1358 * @iocbq: Pointer to driver iocb object.
1359 *
1360 * This function is called to release the driver iocb object
1361 * to the iocb pool. The iotag in the iocb object
1362 * does not change for each use of the iocb object. This function
1363 * clears all other fields of the iocb object when it is freed.
1364 * The sqlq structure that holds the xritag and phys and virtual
1365 * mappings for the scatter gather list is retrieved from the
1366 * active array of sglq. The get of the sglq pointer also clears
1367 * the entry in the array. If the status of the IO indiactes that
1368 * this IO was aborted then the sglq entry it put on the
1369 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1370 * IO has good status or fails for any other reason then the sglq
1371 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1372 * asserted held in the code path calling this routine.
1373 **/
1374static void
1375__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1376{
1377 struct lpfc_sglq *sglq;
1378 unsigned long iflag = 0;
1379 struct lpfc_sli_ring *pring;
1380
1381 if (iocbq->sli4_xritag == NO_XRI)
1382 sglq = NULL;
1383 else
1384 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1385
1386
1387 if (sglq) {
1388 if (iocbq->cmd_flag & LPFC_IO_NVMET) {
1389 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1390 iflag);
1391 sglq->state = SGL_FREED;
1392 sglq->ndlp = NULL;
1393 list_add_tail(&sglq->list,
1394 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1395 spin_unlock_irqrestore(
1396 &phba->sli4_hba.sgl_list_lock, iflag);
1397 goto out;
1398 }
1399
1400 if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
1401 (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
1402 sglq->state != SGL_XRI_ABORTED) {
1403 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1404 iflag);
1405
1406 /* Check if we can get a reference on ndlp */
1407 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1408 sglq->ndlp = NULL;
1409
1410 list_add(&sglq->list,
1411 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1412 spin_unlock_irqrestore(
1413 &phba->sli4_hba.sgl_list_lock, iflag);
1414 } else {
1415 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1416 iflag);
1417 sglq->state = SGL_FREED;
1418 sglq->ndlp = NULL;
1419 list_add_tail(&sglq->list,
1420 &phba->sli4_hba.lpfc_els_sgl_list);
1421 spin_unlock_irqrestore(
1422 &phba->sli4_hba.sgl_list_lock, iflag);
1423 pring = lpfc_phba_elsring(phba);
1424 /* Check if TXQ queue needs to be serviced */
1425 if (pring && (!list_empty(&pring->txq)))
1426 lpfc_worker_wake_up(phba);
1427 }
1428 }
1429
1430out:
1431 /*
1432 * Clean all volatile data fields, preserve iotag and node struct.
1433 */
1434 memset_startat(iocbq, 0, wqe);
1435 iocbq->sli4_lxritag = NO_XRI;
1436 iocbq->sli4_xritag = NO_XRI;
1437 iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
1438 LPFC_IO_NVME_LS);
1439 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1440}
1441
1442
1443/**
1444 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1445 * @phba: Pointer to HBA context object.
1446 * @iocbq: Pointer to driver iocb object.
1447 *
1448 * This function is called to release the driver iocb object to the
1449 * iocb pool. The iotag in the iocb object does not change for each
1450 * use of the iocb object. This function clears all other fields of
1451 * the iocb object when it is freed. The hbalock is asserted held in
1452 * the code path calling this routine.
1453 **/
1454static void
1455__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1456{
1457
1458 /*
1459 * Clean all volatile data fields, preserve iotag and node struct.
1460 */
1461 memset_startat(iocbq, 0, iocb);
1462 iocbq->sli4_xritag = NO_XRI;
1463 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1464}
1465
1466/**
1467 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1468 * @phba: Pointer to HBA context object.
1469 * @iocbq: Pointer to driver iocb object.
1470 *
1471 * This function is called with hbalock held to release driver
1472 * iocb object to the iocb pool. The iotag in the iocb object
1473 * does not change for each use of the iocb object. This function
1474 * clears all other fields of the iocb object when it is freed.
1475 **/
1476static void
1477__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1478{
1479 lockdep_assert_held(&phba->hbalock);
1480
1481 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1482 phba->iocb_cnt--;
1483}
1484
1485/**
1486 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1487 * @phba: Pointer to HBA context object.
1488 * @iocbq: Pointer to driver iocb object.
1489 *
1490 * This function is called with no lock held to release the iocb to
1491 * iocb pool.
1492 **/
1493void
1494lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1495{
1496 unsigned long iflags;
1497
1498 /*
1499 * Clean all volatile data fields, preserve iotag and node struct.
1500 */
1501 spin_lock_irqsave(&phba->hbalock, iflags);
1502 __lpfc_sli_release_iocbq(phba, iocbq);
1503 spin_unlock_irqrestore(&phba->hbalock, iflags);
1504}
1505
1506/**
1507 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1508 * @phba: Pointer to HBA context object.
1509 * @iocblist: List of IOCBs.
1510 * @ulpstatus: ULP status in IOCB command field.
1511 * @ulpWord4: ULP word-4 in IOCB command field.
1512 *
1513 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1514 * on the list by invoking the complete callback function associated with the
1515 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1516 * fields.
1517 **/
1518void
1519lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1520 uint32_t ulpstatus, uint32_t ulpWord4)
1521{
1522 struct lpfc_iocbq *piocb;
1523
1524 while (!list_empty(iocblist)) {
1525 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1526 if (piocb->cmd_cmpl) {
1527 if (piocb->cmd_flag & LPFC_IO_NVME) {
1528 lpfc_nvme_cancel_iocb(phba, piocb,
1529 ulpstatus, ulpWord4);
1530 } else {
1531 if (phba->sli_rev == LPFC_SLI_REV4) {
1532 bf_set(lpfc_wcqe_c_status,
1533 &piocb->wcqe_cmpl, ulpstatus);
1534 piocb->wcqe_cmpl.parameter = ulpWord4;
1535 } else {
1536 piocb->iocb.ulpStatus = ulpstatus;
1537 piocb->iocb.un.ulpWord[4] = ulpWord4;
1538 }
1539 (piocb->cmd_cmpl) (phba, piocb, piocb);
1540 }
1541 } else {
1542 lpfc_sli_release_iocbq(phba, piocb);
1543 }
1544 }
1545 return;
1546}
1547
1548/**
1549 * lpfc_sli_iocb_cmd_type - Get the iocb type
1550 * @iocb_cmnd: iocb command code.
1551 *
1552 * This function is called by ring event handler function to get the iocb type.
1553 * This function translates the iocb command to an iocb command type used to
1554 * decide the final disposition of each completed IOCB.
1555 * The function returns
1556 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1557 * LPFC_SOL_IOCB if it is a solicited iocb completion
1558 * LPFC_ABORT_IOCB if it is an abort iocb
1559 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1560 *
1561 * The caller is not required to hold any lock.
1562 **/
1563static lpfc_iocb_type
1564lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1565{
1566 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1567
1568 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1569 return 0;
1570
1571 switch (iocb_cmnd) {
1572 case CMD_XMIT_SEQUENCE_CR:
1573 case CMD_XMIT_SEQUENCE_CX:
1574 case CMD_XMIT_BCAST_CN:
1575 case CMD_XMIT_BCAST_CX:
1576 case CMD_ELS_REQUEST_CR:
1577 case CMD_ELS_REQUEST_CX:
1578 case CMD_CREATE_XRI_CR:
1579 case CMD_CREATE_XRI_CX:
1580 case CMD_GET_RPI_CN:
1581 case CMD_XMIT_ELS_RSP_CX:
1582 case CMD_GET_RPI_CR:
1583 case CMD_FCP_IWRITE_CR:
1584 case CMD_FCP_IWRITE_CX:
1585 case CMD_FCP_IREAD_CR:
1586 case CMD_FCP_IREAD_CX:
1587 case CMD_FCP_ICMND_CR:
1588 case CMD_FCP_ICMND_CX:
1589 case CMD_FCP_TSEND_CX:
1590 case CMD_FCP_TRSP_CX:
1591 case CMD_FCP_TRECEIVE_CX:
1592 case CMD_FCP_AUTO_TRSP_CX:
1593 case CMD_ADAPTER_MSG:
1594 case CMD_ADAPTER_DUMP:
1595 case CMD_XMIT_SEQUENCE64_CR:
1596 case CMD_XMIT_SEQUENCE64_CX:
1597 case CMD_XMIT_BCAST64_CN:
1598 case CMD_XMIT_BCAST64_CX:
1599 case CMD_ELS_REQUEST64_CR:
1600 case CMD_ELS_REQUEST64_CX:
1601 case CMD_FCP_IWRITE64_CR:
1602 case CMD_FCP_IWRITE64_CX:
1603 case CMD_FCP_IREAD64_CR:
1604 case CMD_FCP_IREAD64_CX:
1605 case CMD_FCP_ICMND64_CR:
1606 case CMD_FCP_ICMND64_CX:
1607 case CMD_FCP_TSEND64_CX:
1608 case CMD_FCP_TRSP64_CX:
1609 case CMD_FCP_TRECEIVE64_CX:
1610 case CMD_GEN_REQUEST64_CR:
1611 case CMD_GEN_REQUEST64_CX:
1612 case CMD_XMIT_ELS_RSP64_CX:
1613 case DSSCMD_IWRITE64_CR:
1614 case DSSCMD_IWRITE64_CX:
1615 case DSSCMD_IREAD64_CR:
1616 case DSSCMD_IREAD64_CX:
1617 case CMD_SEND_FRAME:
1618 type = LPFC_SOL_IOCB;
1619 break;
1620 case CMD_ABORT_XRI_CN:
1621 case CMD_ABORT_XRI_CX:
1622 case CMD_CLOSE_XRI_CN:
1623 case CMD_CLOSE_XRI_CX:
1624 case CMD_XRI_ABORTED_CX:
1625 case CMD_ABORT_MXRI64_CN:
1626 case CMD_XMIT_BLS_RSP64_CX:
1627 type = LPFC_ABORT_IOCB;
1628 break;
1629 case CMD_RCV_SEQUENCE_CX:
1630 case CMD_RCV_ELS_REQ_CX:
1631 case CMD_RCV_SEQUENCE64_CX:
1632 case CMD_RCV_ELS_REQ64_CX:
1633 case CMD_ASYNC_STATUS:
1634 case CMD_IOCB_RCV_SEQ64_CX:
1635 case CMD_IOCB_RCV_ELS64_CX:
1636 case CMD_IOCB_RCV_CONT64_CX:
1637 case CMD_IOCB_RET_XRI64_CX:
1638 type = LPFC_UNSOL_IOCB;
1639 break;
1640 case CMD_IOCB_XMIT_MSEQ64_CR:
1641 case CMD_IOCB_XMIT_MSEQ64_CX:
1642 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1643 case CMD_IOCB_RCV_ELS_LIST64_CX:
1644 case CMD_IOCB_CLOSE_EXTENDED_CN:
1645 case CMD_IOCB_ABORT_EXTENDED_CN:
1646 case CMD_IOCB_RET_HBQE64_CN:
1647 case CMD_IOCB_FCP_IBIDIR64_CR:
1648 case CMD_IOCB_FCP_IBIDIR64_CX:
1649 case CMD_IOCB_FCP_ITASKMGT64_CX:
1650 case CMD_IOCB_LOGENTRY_CN:
1651 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1652 printk("%s - Unhandled SLI-3 Command x%x\n",
1653 __func__, iocb_cmnd);
1654 type = LPFC_UNKNOWN_IOCB;
1655 break;
1656 default:
1657 type = LPFC_UNKNOWN_IOCB;
1658 break;
1659 }
1660
1661 return type;
1662}
1663
1664/**
1665 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1666 * @phba: Pointer to HBA context object.
1667 *
1668 * This function is called from SLI initialization code
1669 * to configure every ring of the HBA's SLI interface. The
1670 * caller is not required to hold any lock. This function issues
1671 * a config_ring mailbox command for each ring.
1672 * This function returns zero if successful else returns a negative
1673 * error code.
1674 **/
1675static int
1676lpfc_sli_ring_map(struct lpfc_hba *phba)
1677{
1678 struct lpfc_sli *psli = &phba->sli;
1679 LPFC_MBOXQ_t *pmb;
1680 MAILBOX_t *pmbox;
1681 int i, rc, ret = 0;
1682
1683 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1684 if (!pmb)
1685 return -ENOMEM;
1686 pmbox = &pmb->u.mb;
1687 phba->link_state = LPFC_INIT_MBX_CMDS;
1688 for (i = 0; i < psli->num_rings; i++) {
1689 lpfc_config_ring(phba, i, pmb);
1690 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1691 if (rc != MBX_SUCCESS) {
1692 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1693 "0446 Adapter failed to init (%d), "
1694 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1695 "ring %d\n",
1696 rc, pmbox->mbxCommand,
1697 pmbox->mbxStatus, i);
1698 phba->link_state = LPFC_HBA_ERROR;
1699 ret = -ENXIO;
1700 break;
1701 }
1702 }
1703 mempool_free(pmb, phba->mbox_mem_pool);
1704 return ret;
1705}
1706
1707/**
1708 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1709 * @phba: Pointer to HBA context object.
1710 * @pring: Pointer to driver SLI ring object.
1711 * @piocb: Pointer to the driver iocb object.
1712 *
1713 * The driver calls this function with the hbalock held for SLI3 ports or
1714 * the ring lock held for SLI4 ports. The function adds the
1715 * new iocb to txcmplq of the given ring. This function always returns
1716 * 0. If this function is called for ELS ring, this function checks if
1717 * there is a vport associated with the ELS command. This function also
1718 * starts els_tmofunc timer if this is an ELS command.
1719 **/
1720static int
1721lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1722 struct lpfc_iocbq *piocb)
1723{
1724 u32 ulp_command = 0;
1725
1726 BUG_ON(!piocb);
1727 ulp_command = get_job_cmnd(phba, piocb);
1728
1729 list_add_tail(&piocb->list, &pring->txcmplq);
1730 piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
1731 pring->txcmplq_cnt++;
1732 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1733 (ulp_command != CMD_ABORT_XRI_WQE) &&
1734 (ulp_command != CMD_ABORT_XRI_CN) &&
1735 (ulp_command != CMD_CLOSE_XRI_CN)) {
1736 BUG_ON(!piocb->vport);
1737 if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag))
1738 mod_timer(&piocb->vport->els_tmofunc,
1739 jiffies +
1740 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1741 }
1742
1743 return 0;
1744}
1745
1746/**
1747 * lpfc_sli_ringtx_get - Get first element of the txq
1748 * @phba: Pointer to HBA context object.
1749 * @pring: Pointer to driver SLI ring object.
1750 *
1751 * This function is called with hbalock held to get next
1752 * iocb in txq of the given ring. If there is any iocb in
1753 * the txq, the function returns first iocb in the list after
1754 * removing the iocb from the list, else it returns NULL.
1755 **/
1756struct lpfc_iocbq *
1757lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1758{
1759 struct lpfc_iocbq *cmd_iocb;
1760
1761 lockdep_assert_held(&phba->hbalock);
1762
1763 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1764 return cmd_iocb;
1765}
1766
1767/**
1768 * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
1769 * @phba: Pointer to HBA context object.
1770 * @cmdiocb: Pointer to driver command iocb object.
1771 * @rspiocb: Pointer to driver response iocb object.
1772 *
1773 * This routine will inform the driver of any BW adjustments we need
1774 * to make. These changes will be picked up during the next CMF
1775 * timer interrupt. In addition, any BW changes will be logged
1776 * with LOG_CGN_MGMT.
1777 **/
1778static void
1779lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1780 struct lpfc_iocbq *rspiocb)
1781{
1782 union lpfc_wqe128 *wqe;
1783 uint32_t status, info;
1784 struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
1785 uint64_t bw, bwdif, slop;
1786 uint64_t pcent, bwpcent;
1787 int asig, afpin, sigcnt, fpincnt;
1788 int wsigmax, wfpinmax, cg, tdp;
1789 char *s;
1790
1791 /* First check for error */
1792 status = bf_get(lpfc_wcqe_c_status, wcqe);
1793 if (status) {
1794 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1795 "6211 CMF_SYNC_WQE Error "
1796 "req_tag x%x status x%x hwstatus x%x "
1797 "tdatap x%x parm x%x\n",
1798 bf_get(lpfc_wcqe_c_request_tag, wcqe),
1799 bf_get(lpfc_wcqe_c_status, wcqe),
1800 bf_get(lpfc_wcqe_c_hw_status, wcqe),
1801 wcqe->total_data_placed,
1802 wcqe->parameter);
1803 goto out;
1804 }
1805
1806 /* Gather congestion information on a successful cmpl */
1807 info = wcqe->parameter;
1808 phba->cmf_active_info = info;
1809
1810 /* See if firmware info count is valid or has changed */
1811 if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
1812 info = 0;
1813 else
1814 phba->cmf_info_per_interval = info;
1815
1816 tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
1817 cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
1818
1819 /* Get BW requirement from firmware */
1820 bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
1821 if (!bw) {
1822 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1823 "6212 CMF_SYNC_WQE x%x: NULL bw\n",
1824 bf_get(lpfc_wcqe_c_request_tag, wcqe));
1825 goto out;
1826 }
1827
1828 /* Gather information needed for logging if a BW change is required */
1829 wqe = &cmdiocb->wqe;
1830 asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
1831 afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
1832 fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
1833 sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
1834 if (phba->cmf_max_bytes_per_interval != bw ||
1835 (asig || afpin || sigcnt || fpincnt)) {
1836 /* Are we increasing or decreasing BW */
1837 if (phba->cmf_max_bytes_per_interval < bw) {
1838 bwdif = bw - phba->cmf_max_bytes_per_interval;
1839 s = "Increase";
1840 } else {
1841 bwdif = phba->cmf_max_bytes_per_interval - bw;
1842 s = "Decrease";
1843 }
1844
1845 /* What is the change percentage */
1846 slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
1847 pcent = div64_u64(bwdif * 100 + slop,
1848 phba->cmf_link_byte_count);
1849 bwpcent = div64_u64(bw * 100 + slop,
1850 phba->cmf_link_byte_count);
1851 /* Because of bytes adjustment due to shorter timer in
1852 * lpfc_cmf_timer() the cmf_link_byte_count can be shorter and
1853 * may seem like BW is above 100%.
1854 */
1855 if (bwpcent > 100)
1856 bwpcent = 100;
1857
1858 if (phba->cmf_max_bytes_per_interval < bw &&
1859 bwpcent > 95)
1860 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1861 "6208 Congestion bandwidth "
1862 "limits removed\n");
1863 else if ((phba->cmf_max_bytes_per_interval > bw) &&
1864 ((bwpcent + pcent) <= 100) && ((bwpcent + pcent) > 95))
1865 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1866 "6209 Congestion bandwidth "
1867 "limits in effect\n");
1868
1869 if (asig) {
1870 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1871 "6237 BW Threshold %lld%% (%lld): "
1872 "%lld%% %s: Signal Alarm: cg:%d "
1873 "Info:%u\n",
1874 bwpcent, bw, pcent, s, cg,
1875 phba->cmf_active_info);
1876 } else if (afpin) {
1877 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1878 "6238 BW Threshold %lld%% (%lld): "
1879 "%lld%% %s: FPIN Alarm: cg:%d "
1880 "Info:%u\n",
1881 bwpcent, bw, pcent, s, cg,
1882 phba->cmf_active_info);
1883 } else if (sigcnt) {
1884 wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
1885 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1886 "6239 BW Threshold %lld%% (%lld): "
1887 "%lld%% %s: Signal Warning: "
1888 "Cnt %d Max %d: cg:%d Info:%u\n",
1889 bwpcent, bw, pcent, s, sigcnt,
1890 wsigmax, cg, phba->cmf_active_info);
1891 } else if (fpincnt) {
1892 wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
1893 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1894 "6240 BW Threshold %lld%% (%lld): "
1895 "%lld%% %s: FPIN Warning: "
1896 "Cnt %d Max %d: cg:%d Info:%u\n",
1897 bwpcent, bw, pcent, s, fpincnt,
1898 wfpinmax, cg, phba->cmf_active_info);
1899 } else {
1900 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1901 "6241 BW Threshold %lld%% (%lld): "
1902 "CMF %lld%% %s: cg:%d Info:%u\n",
1903 bwpcent, bw, pcent, s, cg,
1904 phba->cmf_active_info);
1905 }
1906 } else if (info) {
1907 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1908 "6246 Info Threshold %u\n", info);
1909 }
1910
1911 /* Save BW change to be picked up during next timer interrupt */
1912 phba->cmf_last_sync_bw = bw;
1913out:
1914 lpfc_sli_release_iocbq(phba, cmdiocb);
1915}
1916
1917/**
1918 * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
1919 * @phba: Pointer to HBA context object.
1920 * @ms: ms to set in WQE interval, 0 means use init op
1921 * @total: Total rcv bytes for this interval
1922 *
1923 * This routine is called every CMF timer interrupt. Its purpose is
1924 * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
1925 * that may indicate we have congestion (FPINs or Signals). Upon
1926 * completion, the firmware will indicate any BW restrictions the
1927 * driver may need to take.
1928 **/
1929int
1930lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
1931{
1932 union lpfc_wqe128 *wqe;
1933 struct lpfc_iocbq *sync_buf;
1934 unsigned long iflags;
1935 u32 ret_val;
1936 u32 atot, wtot, max;
1937 u8 warn_sync_period = 0;
1938
1939 /* First address any alarm / warning activity */
1940 atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
1941 wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
1942
1943 /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
1944 if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
1945 phba->link_state == LPFC_LINK_DOWN)
1946 return 0;
1947
1948 spin_lock_irqsave(&phba->hbalock, iflags);
1949 sync_buf = __lpfc_sli_get_iocbq(phba);
1950 if (!sync_buf) {
1951 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
1952 "6244 No available WQEs for CMF_SYNC_WQE\n");
1953 ret_val = ENOMEM;
1954 goto out_unlock;
1955 }
1956
1957 wqe = &sync_buf->wqe;
1958
1959 /* WQEs are reused. Clear stale data and set key fields to zero */
1960 memset(wqe, 0, sizeof(*wqe));
1961
1962 /* If this is the very first CMF_SYNC_WQE, issue an init operation */
1963 if (!ms) {
1964 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1965 "6441 CMF Init %d - CMF_SYNC_WQE\n",
1966 phba->fc_eventTag);
1967 bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
1968 bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
1969 goto initpath;
1970 }
1971
1972 bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
1973 bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
1974
1975 /* Check for alarms / warnings */
1976 if (atot) {
1977 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1978 /* We hit an Signal alarm condition */
1979 bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
1980 } else {
1981 /* We hit a FPIN alarm condition */
1982 bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
1983 }
1984 } else if (wtot) {
1985 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
1986 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1987 /* We hit an Signal warning condition */
1988 max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
1989 lpfc_acqe_cgn_frequency;
1990 bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
1991 bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
1992 warn_sync_period = lpfc_acqe_cgn_frequency;
1993 } else {
1994 /* We hit a FPIN warning condition */
1995 bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
1996 bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
1997 if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ)
1998 warn_sync_period =
1999 LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency);
2000 }
2001 }
2002
2003 /* Update total read blocks during previous timer interval */
2004 wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
2005
2006initpath:
2007 bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
2008 wqe->cmf_sync.event_tag = phba->fc_eventTag;
2009 bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
2010
2011 /* Setup reqtag to match the wqe completion. */
2012 bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
2013
2014 bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
2015 bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period);
2016
2017 bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
2018 bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
2019 bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
2020
2021 sync_buf->vport = phba->pport;
2022 sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
2023 sync_buf->cmd_dmabuf = NULL;
2024 sync_buf->rsp_dmabuf = NULL;
2025 sync_buf->bpl_dmabuf = NULL;
2026 sync_buf->sli4_xritag = NO_XRI;
2027
2028 sync_buf->cmd_flag |= LPFC_IO_CMF;
2029 ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
2030 if (ret_val) {
2031 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
2032 "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
2033 ret_val);
2034 __lpfc_sli_release_iocbq(phba, sync_buf);
2035 }
2036out_unlock:
2037 spin_unlock_irqrestore(&phba->hbalock, iflags);
2038 return ret_val;
2039}
2040
2041/**
2042 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
2043 * @phba: Pointer to HBA context object.
2044 * @pring: Pointer to driver SLI ring object.
2045 *
2046 * This function is called with hbalock held and the caller must post the
2047 * iocb without releasing the lock. If the caller releases the lock,
2048 * iocb slot returned by the function is not guaranteed to be available.
2049 * The function returns pointer to the next available iocb slot if there
2050 * is available slot in the ring, else it returns NULL.
2051 * If the get index of the ring is ahead of the put index, the function
2052 * will post an error attention event to the worker thread to take the
2053 * HBA to offline state.
2054 **/
2055static IOCB_t *
2056lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2057{
2058 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2059 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
2060
2061 lockdep_assert_held(&phba->hbalock);
2062
2063 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
2064 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
2065 pring->sli.sli3.next_cmdidx = 0;
2066
2067 if (unlikely(pring->sli.sli3.local_getidx ==
2068 pring->sli.sli3.next_cmdidx)) {
2069
2070 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
2071
2072 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
2073 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2074 "0315 Ring %d issue: portCmdGet %d "
2075 "is bigger than cmd ring %d\n",
2076 pring->ringno,
2077 pring->sli.sli3.local_getidx,
2078 max_cmd_idx);
2079
2080 phba->link_state = LPFC_HBA_ERROR;
2081 /*
2082 * All error attention handlers are posted to
2083 * worker thread
2084 */
2085 phba->work_ha |= HA_ERATT;
2086 phba->work_hs = HS_FFER3;
2087
2088 lpfc_worker_wake_up(phba);
2089
2090 return NULL;
2091 }
2092
2093 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
2094 return NULL;
2095 }
2096
2097 return lpfc_cmd_iocb(phba, pring);
2098}
2099
2100/**
2101 * lpfc_sli_next_iotag - Get an iotag for the iocb
2102 * @phba: Pointer to HBA context object.
2103 * @iocbq: Pointer to driver iocb object.
2104 *
2105 * This function gets an iotag for the iocb. If there is no unused iotag and
2106 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
2107 * array and assigns a new iotag.
2108 * The function returns the allocated iotag if successful, else returns zero.
2109 * Zero is not a valid iotag.
2110 * The caller is not required to hold any lock.
2111 **/
2112uint16_t
2113lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
2114{
2115 struct lpfc_iocbq **new_arr;
2116 struct lpfc_iocbq **old_arr;
2117 size_t new_len;
2118 struct lpfc_sli *psli = &phba->sli;
2119 uint16_t iotag;
2120
2121 spin_lock_irq(&phba->hbalock);
2122 iotag = psli->last_iotag;
2123 if(++iotag < psli->iocbq_lookup_len) {
2124 psli->last_iotag = iotag;
2125 psli->iocbq_lookup[iotag] = iocbq;
2126 spin_unlock_irq(&phba->hbalock);
2127 iocbq->iotag = iotag;
2128 return iotag;
2129 } else if (psli->iocbq_lookup_len < (0xffff
2130 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
2131 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2132 spin_unlock_irq(&phba->hbalock);
2133 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
2134 GFP_KERNEL);
2135 if (new_arr) {
2136 spin_lock_irq(&phba->hbalock);
2137 old_arr = psli->iocbq_lookup;
2138 if (new_len <= psli->iocbq_lookup_len) {
2139 /* highly unprobable case */
2140 kfree(new_arr);
2141 iotag = psli->last_iotag;
2142 if(++iotag < psli->iocbq_lookup_len) {
2143 psli->last_iotag = iotag;
2144 psli->iocbq_lookup[iotag] = iocbq;
2145 spin_unlock_irq(&phba->hbalock);
2146 iocbq->iotag = iotag;
2147 return iotag;
2148 }
2149 spin_unlock_irq(&phba->hbalock);
2150 return 0;
2151 }
2152 if (psli->iocbq_lookup)
2153 memcpy(new_arr, old_arr,
2154 ((psli->last_iotag + 1) *
2155 sizeof (struct lpfc_iocbq *)));
2156 psli->iocbq_lookup = new_arr;
2157 psli->iocbq_lookup_len = new_len;
2158 psli->last_iotag = iotag;
2159 psli->iocbq_lookup[iotag] = iocbq;
2160 spin_unlock_irq(&phba->hbalock);
2161 iocbq->iotag = iotag;
2162 kfree(old_arr);
2163 return iotag;
2164 }
2165 } else
2166 spin_unlock_irq(&phba->hbalock);
2167
2168 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2169 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
2170 psli->last_iotag);
2171
2172 return 0;
2173}
2174
2175/**
2176 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
2177 * @phba: Pointer to HBA context object.
2178 * @pring: Pointer to driver SLI ring object.
2179 * @iocb: Pointer to iocb slot in the ring.
2180 * @nextiocb: Pointer to driver iocb object which need to be
2181 * posted to firmware.
2182 *
2183 * This function is called to post a new iocb to the firmware. This
2184 * function copies the new iocb to ring iocb slot and updates the
2185 * ring pointers. It adds the new iocb to txcmplq if there is
2186 * a completion call back for this iocb else the function will free the
2187 * iocb object. The hbalock is asserted held in the code path calling
2188 * this routine.
2189 **/
2190static void
2191lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2192 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
2193{
2194 /*
2195 * Set up an iotag
2196 */
2197 nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
2198
2199
2200 if (pring->ringno == LPFC_ELS_RING) {
2201 lpfc_debugfs_slow_ring_trc(phba,
2202 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
2203 *(((uint32_t *) &nextiocb->iocb) + 4),
2204 *(((uint32_t *) &nextiocb->iocb) + 6),
2205 *(((uint32_t *) &nextiocb->iocb) + 7));
2206 }
2207
2208 /*
2209 * Issue iocb command to adapter
2210 */
2211 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
2212 wmb();
2213 pring->stats.iocb_cmd++;
2214
2215 /*
2216 * If there is no completion routine to call, we can release the
2217 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
2218 * that have no rsp ring completion, cmd_cmpl MUST be NULL.
2219 */
2220 if (nextiocb->cmd_cmpl)
2221 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
2222 else
2223 __lpfc_sli_release_iocbq(phba, nextiocb);
2224
2225 /*
2226 * Let the HBA know what IOCB slot will be the next one the
2227 * driver will put a command into.
2228 */
2229 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
2230 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
2231}
2232
2233/**
2234 * lpfc_sli_update_full_ring - Update the chip attention register
2235 * @phba: Pointer to HBA context object.
2236 * @pring: Pointer to driver SLI ring object.
2237 *
2238 * The caller is not required to hold any lock for calling this function.
2239 * This function updates the chip attention bits for the ring to inform firmware
2240 * that there are pending work to be done for this ring and requests an
2241 * interrupt when there is space available in the ring. This function is
2242 * called when the driver is unable to post more iocbs to the ring due
2243 * to unavailability of space in the ring.
2244 **/
2245static void
2246lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2247{
2248 int ringno = pring->ringno;
2249
2250 pring->flag |= LPFC_CALL_RING_AVAILABLE;
2251
2252 wmb();
2253
2254 /*
2255 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
2256 * The HBA will tell us when an IOCB entry is available.
2257 */
2258 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
2259 readl(phba->CAregaddr); /* flush */
2260
2261 pring->stats.iocb_cmd_full++;
2262}
2263
2264/**
2265 * lpfc_sli_update_ring - Update chip attention register
2266 * @phba: Pointer to HBA context object.
2267 * @pring: Pointer to driver SLI ring object.
2268 *
2269 * This function updates the chip attention register bit for the
2270 * given ring to inform HBA that there is more work to be done
2271 * in this ring. The caller is not required to hold any lock.
2272 **/
2273static void
2274lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2275{
2276 int ringno = pring->ringno;
2277
2278 /*
2279 * Tell the HBA that there is work to do in this ring.
2280 */
2281 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2282 wmb();
2283 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2284 readl(phba->CAregaddr); /* flush */
2285 }
2286}
2287
2288/**
2289 * lpfc_sli_resume_iocb - Process iocbs in the txq
2290 * @phba: Pointer to HBA context object.
2291 * @pring: Pointer to driver SLI ring object.
2292 *
2293 * This function is called with hbalock held to post pending iocbs
2294 * in the txq to the firmware. This function is called when driver
2295 * detects space available in the ring.
2296 **/
2297static void
2298lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2299{
2300 IOCB_t *iocb;
2301 struct lpfc_iocbq *nextiocb;
2302
2303 lockdep_assert_held(&phba->hbalock);
2304
2305 /*
2306 * Check to see if:
2307 * (a) there is anything on the txq to send
2308 * (b) link is up
2309 * (c) link attention events can be processed (fcp ring only)
2310 * (d) IOCB processing is not blocked by the outstanding mbox command.
2311 */
2312
2313 if (lpfc_is_link_up(phba) &&
2314 (!list_empty(&pring->txq)) &&
2315 (pring->ringno != LPFC_FCP_RING ||
2316 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2317
2318 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2319 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2320 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2321
2322 if (iocb)
2323 lpfc_sli_update_ring(phba, pring);
2324 else
2325 lpfc_sli_update_full_ring(phba, pring);
2326 }
2327
2328 return;
2329}
2330
2331/**
2332 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2333 * @phba: Pointer to HBA context object.
2334 * @hbqno: HBQ number.
2335 *
2336 * This function is called with hbalock held to get the next
2337 * available slot for the given HBQ. If there is free slot
2338 * available for the HBQ it will return pointer to the next available
2339 * HBQ entry else it will return NULL.
2340 **/
2341static struct lpfc_hbq_entry *
2342lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2343{
2344 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2345
2346 lockdep_assert_held(&phba->hbalock);
2347
2348 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2349 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2350 hbqp->next_hbqPutIdx = 0;
2351
2352 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2353 uint32_t raw_index = phba->hbq_get[hbqno];
2354 uint32_t getidx = le32_to_cpu(raw_index);
2355
2356 hbqp->local_hbqGetIdx = getidx;
2357
2358 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2359 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2360 "1802 HBQ %d: local_hbqGetIdx "
2361 "%u is > than hbqp->entry_count %u\n",
2362 hbqno, hbqp->local_hbqGetIdx,
2363 hbqp->entry_count);
2364
2365 phba->link_state = LPFC_HBA_ERROR;
2366 return NULL;
2367 }
2368
2369 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2370 return NULL;
2371 }
2372
2373 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2374 hbqp->hbqPutIdx;
2375}
2376
2377/**
2378 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2379 * @phba: Pointer to HBA context object.
2380 *
2381 * This function is called with no lock held to free all the
2382 * hbq buffers while uninitializing the SLI interface. It also
2383 * frees the HBQ buffers returned by the firmware but not yet
2384 * processed by the upper layers.
2385 **/
2386void
2387lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2388{
2389 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2390 struct hbq_dmabuf *hbq_buf;
2391 unsigned long flags;
2392 int i, hbq_count;
2393
2394 hbq_count = lpfc_sli_hbq_count();
2395 /* Return all memory used by all HBQs */
2396 spin_lock_irqsave(&phba->hbalock, flags);
2397 for (i = 0; i < hbq_count; ++i) {
2398 list_for_each_entry_safe(dmabuf, next_dmabuf,
2399 &phba->hbqs[i].hbq_buffer_list, list) {
2400 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2401 list_del(&hbq_buf->dbuf.list);
2402 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2403 }
2404 phba->hbqs[i].buffer_count = 0;
2405 }
2406
2407 /* Mark the HBQs not in use */
2408 phba->hbq_in_use = 0;
2409 spin_unlock_irqrestore(&phba->hbalock, flags);
2410}
2411
2412/**
2413 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2414 * @phba: Pointer to HBA context object.
2415 * @hbqno: HBQ number.
2416 * @hbq_buf: Pointer to HBQ buffer.
2417 *
2418 * This function is called with the hbalock held to post a
2419 * hbq buffer to the firmware. If the function finds an empty
2420 * slot in the HBQ, it will post the buffer. The function will return
2421 * pointer to the hbq entry if it successfully post the buffer
2422 * else it will return NULL.
2423 **/
2424static int
2425lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2426 struct hbq_dmabuf *hbq_buf)
2427{
2428 lockdep_assert_held(&phba->hbalock);
2429 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2430}
2431
2432/**
2433 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2434 * @phba: Pointer to HBA context object.
2435 * @hbqno: HBQ number.
2436 * @hbq_buf: Pointer to HBQ buffer.
2437 *
2438 * This function is called with the hbalock held to post a hbq buffer to the
2439 * firmware. If the function finds an empty slot in the HBQ, it will post the
2440 * buffer and place it on the hbq_buffer_list. The function will return zero if
2441 * it successfully post the buffer else it will return an error.
2442 **/
2443static int
2444lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2445 struct hbq_dmabuf *hbq_buf)
2446{
2447 struct lpfc_hbq_entry *hbqe;
2448 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2449
2450 lockdep_assert_held(&phba->hbalock);
2451 /* Get next HBQ entry slot to use */
2452 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2453 if (hbqe) {
2454 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2455
2456 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2457 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2458 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2459 hbqe->bde.tus.f.bdeFlags = 0;
2460 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2461 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2462 /* Sync SLIM */
2463 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2464 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2465 /* flush */
2466 readl(phba->hbq_put + hbqno);
2467 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2468 return 0;
2469 } else
2470 return -ENOMEM;
2471}
2472
2473/**
2474 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2475 * @phba: Pointer to HBA context object.
2476 * @hbqno: HBQ number.
2477 * @hbq_buf: Pointer to HBQ buffer.
2478 *
2479 * This function is called with the hbalock held to post an RQE to the SLI4
2480 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2481 * the hbq_buffer_list and return zero, otherwise it will return an error.
2482 **/
2483static int
2484lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2485 struct hbq_dmabuf *hbq_buf)
2486{
2487 int rc;
2488 struct lpfc_rqe hrqe;
2489 struct lpfc_rqe drqe;
2490 struct lpfc_queue *hrq;
2491 struct lpfc_queue *drq;
2492
2493 if (hbqno != LPFC_ELS_HBQ)
2494 return 1;
2495 hrq = phba->sli4_hba.hdr_rq;
2496 drq = phba->sli4_hba.dat_rq;
2497
2498 lockdep_assert_held(&phba->hbalock);
2499 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2500 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2501 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2502 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2503 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2504 if (rc < 0)
2505 return rc;
2506 hbq_buf->tag = (rc | (hbqno << 16));
2507 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2508 return 0;
2509}
2510
2511/* HBQ for ELS and CT traffic. */
2512static struct lpfc_hbq_init lpfc_els_hbq = {
2513 .rn = 1,
2514 .entry_count = 256,
2515 .mask_count = 0,
2516 .profile = 0,
2517 .ring_mask = (1 << LPFC_ELS_RING),
2518 .buffer_count = 0,
2519 .init_count = 40,
2520 .add_count = 40,
2521};
2522
2523/* Array of HBQs */
2524struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2525 &lpfc_els_hbq,
2526};
2527
2528/**
2529 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2530 * @phba: Pointer to HBA context object.
2531 * @hbqno: HBQ number.
2532 * @count: Number of HBQ buffers to be posted.
2533 *
2534 * This function is called with no lock held to post more hbq buffers to the
2535 * given HBQ. The function returns the number of HBQ buffers successfully
2536 * posted.
2537 **/
2538static int
2539lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2540{
2541 uint32_t i, posted = 0;
2542 unsigned long flags;
2543 struct hbq_dmabuf *hbq_buffer;
2544 LIST_HEAD(hbq_buf_list);
2545 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2546 return 0;
2547
2548 if ((phba->hbqs[hbqno].buffer_count + count) >
2549 lpfc_hbq_defs[hbqno]->entry_count)
2550 count = lpfc_hbq_defs[hbqno]->entry_count -
2551 phba->hbqs[hbqno].buffer_count;
2552 if (!count)
2553 return 0;
2554 /* Allocate HBQ entries */
2555 for (i = 0; i < count; i++) {
2556 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2557 if (!hbq_buffer)
2558 break;
2559 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2560 }
2561 /* Check whether HBQ is still in use */
2562 spin_lock_irqsave(&phba->hbalock, flags);
2563 if (!phba->hbq_in_use)
2564 goto err;
2565 while (!list_empty(&hbq_buf_list)) {
2566 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2567 dbuf.list);
2568 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2569 (hbqno << 16));
2570 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2571 phba->hbqs[hbqno].buffer_count++;
2572 posted++;
2573 } else
2574 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2575 }
2576 spin_unlock_irqrestore(&phba->hbalock, flags);
2577 return posted;
2578err:
2579 spin_unlock_irqrestore(&phba->hbalock, flags);
2580 while (!list_empty(&hbq_buf_list)) {
2581 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2582 dbuf.list);
2583 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2584 }
2585 return 0;
2586}
2587
2588/**
2589 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2590 * @phba: Pointer to HBA context object.
2591 * @qno: HBQ number.
2592 *
2593 * This function posts more buffers to the HBQ. This function
2594 * is called with no lock held. The function returns the number of HBQ entries
2595 * successfully allocated.
2596 **/
2597int
2598lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2599{
2600 if (phba->sli_rev == LPFC_SLI_REV4)
2601 return 0;
2602 else
2603 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2604 lpfc_hbq_defs[qno]->add_count);
2605}
2606
2607/**
2608 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2609 * @phba: Pointer to HBA context object.
2610 * @qno: HBQ queue number.
2611 *
2612 * This function is called from SLI initialization code path with
2613 * no lock held to post initial HBQ buffers to firmware. The
2614 * function returns the number of HBQ entries successfully allocated.
2615 **/
2616static int
2617lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2618{
2619 if (phba->sli_rev == LPFC_SLI_REV4)
2620 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2621 lpfc_hbq_defs[qno]->entry_count);
2622 else
2623 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2624 lpfc_hbq_defs[qno]->init_count);
2625}
2626
2627/*
2628 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2629 *
2630 * This function removes the first hbq buffer on an hbq list and returns a
2631 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2632 **/
2633static struct hbq_dmabuf *
2634lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2635{
2636 struct lpfc_dmabuf *d_buf;
2637
2638 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2639 if (!d_buf)
2640 return NULL;
2641 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2642}
2643
2644/**
2645 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2646 * @phba: Pointer to HBA context object.
2647 * @hrq: HBQ number.
2648 *
2649 * This function removes the first RQ buffer on an RQ buffer list and returns a
2650 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2651 **/
2652static struct rqb_dmabuf *
2653lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2654{
2655 struct lpfc_dmabuf *h_buf;
2656 struct lpfc_rqb *rqbp;
2657
2658 rqbp = hrq->rqbp;
2659 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2660 struct lpfc_dmabuf, list);
2661 if (!h_buf)
2662 return NULL;
2663 rqbp->buffer_count--;
2664 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2665}
2666
2667/**
2668 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2669 * @phba: Pointer to HBA context object.
2670 * @tag: Tag of the hbq buffer.
2671 *
2672 * This function searches for the hbq buffer associated with the given tag in
2673 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2674 * otherwise it returns NULL.
2675 **/
2676static struct hbq_dmabuf *
2677lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2678{
2679 struct lpfc_dmabuf *d_buf;
2680 struct hbq_dmabuf *hbq_buf;
2681 uint32_t hbqno;
2682
2683 hbqno = tag >> 16;
2684 if (hbqno >= LPFC_MAX_HBQS)
2685 return NULL;
2686
2687 spin_lock_irq(&phba->hbalock);
2688 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2689 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2690 if (hbq_buf->tag == tag) {
2691 spin_unlock_irq(&phba->hbalock);
2692 return hbq_buf;
2693 }
2694 }
2695 spin_unlock_irq(&phba->hbalock);
2696 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2697 "1803 Bad hbq tag. Data: x%x x%x\n",
2698 tag, phba->hbqs[tag >> 16].buffer_count);
2699 return NULL;
2700}
2701
2702/**
2703 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2704 * @phba: Pointer to HBA context object.
2705 * @hbq_buffer: Pointer to HBQ buffer.
2706 *
2707 * This function is called with hbalock. This function gives back
2708 * the hbq buffer to firmware. If the HBQ does not have space to
2709 * post the buffer, it will free the buffer.
2710 **/
2711void
2712lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2713{
2714 uint32_t hbqno;
2715
2716 if (hbq_buffer) {
2717 hbqno = hbq_buffer->tag >> 16;
2718 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2719 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2720 }
2721}
2722
2723/**
2724 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2725 * @mbxCommand: mailbox command code.
2726 *
2727 * This function is called by the mailbox event handler function to verify
2728 * that the completed mailbox command is a legitimate mailbox command. If the
2729 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2730 * and the mailbox event handler will take the HBA offline.
2731 **/
2732static int
2733lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2734{
2735 uint8_t ret;
2736
2737 switch (mbxCommand) {
2738 case MBX_LOAD_SM:
2739 case MBX_READ_NV:
2740 case MBX_WRITE_NV:
2741 case MBX_WRITE_VPARMS:
2742 case MBX_RUN_BIU_DIAG:
2743 case MBX_INIT_LINK:
2744 case MBX_DOWN_LINK:
2745 case MBX_CONFIG_LINK:
2746 case MBX_CONFIG_RING:
2747 case MBX_RESET_RING:
2748 case MBX_READ_CONFIG:
2749 case MBX_READ_RCONFIG:
2750 case MBX_READ_SPARM:
2751 case MBX_READ_STATUS:
2752 case MBX_READ_RPI:
2753 case MBX_READ_XRI:
2754 case MBX_READ_REV:
2755 case MBX_READ_LNK_STAT:
2756 case MBX_REG_LOGIN:
2757 case MBX_UNREG_LOGIN:
2758 case MBX_CLEAR_LA:
2759 case MBX_DUMP_MEMORY:
2760 case MBX_DUMP_CONTEXT:
2761 case MBX_RUN_DIAGS:
2762 case MBX_RESTART:
2763 case MBX_UPDATE_CFG:
2764 case MBX_DOWN_LOAD:
2765 case MBX_DEL_LD_ENTRY:
2766 case MBX_RUN_PROGRAM:
2767 case MBX_SET_MASK:
2768 case MBX_SET_VARIABLE:
2769 case MBX_UNREG_D_ID:
2770 case MBX_KILL_BOARD:
2771 case MBX_CONFIG_FARP:
2772 case MBX_BEACON:
2773 case MBX_LOAD_AREA:
2774 case MBX_RUN_BIU_DIAG64:
2775 case MBX_CONFIG_PORT:
2776 case MBX_READ_SPARM64:
2777 case MBX_READ_RPI64:
2778 case MBX_REG_LOGIN64:
2779 case MBX_READ_TOPOLOGY:
2780 case MBX_WRITE_WWN:
2781 case MBX_SET_DEBUG:
2782 case MBX_LOAD_EXP_ROM:
2783 case MBX_ASYNCEVT_ENABLE:
2784 case MBX_REG_VPI:
2785 case MBX_UNREG_VPI:
2786 case MBX_HEARTBEAT:
2787 case MBX_PORT_CAPABILITIES:
2788 case MBX_PORT_IOV_CONTROL:
2789 case MBX_SLI4_CONFIG:
2790 case MBX_SLI4_REQ_FTRS:
2791 case MBX_REG_FCFI:
2792 case MBX_UNREG_FCFI:
2793 case MBX_REG_VFI:
2794 case MBX_UNREG_VFI:
2795 case MBX_INIT_VPI:
2796 case MBX_INIT_VFI:
2797 case MBX_RESUME_RPI:
2798 case MBX_READ_EVENT_LOG_STATUS:
2799 case MBX_READ_EVENT_LOG:
2800 case MBX_SECURITY_MGMT:
2801 case MBX_AUTH_PORT:
2802 case MBX_ACCESS_VDATA:
2803 ret = mbxCommand;
2804 break;
2805 default:
2806 ret = MBX_SHUTDOWN;
2807 break;
2808 }
2809 return ret;
2810}
2811
2812/**
2813 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2814 * @phba: Pointer to HBA context object.
2815 * @pmboxq: Pointer to mailbox command.
2816 *
2817 * This is completion handler function for mailbox commands issued from
2818 * lpfc_sli_issue_mbox_wait function. This function is called by the
2819 * mailbox event handler function with no lock held. This function
2820 * will wake up thread waiting on the wait queue pointed by context1
2821 * of the mailbox.
2822 **/
2823void
2824lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2825{
2826 unsigned long drvr_flag;
2827 struct completion *pmbox_done;
2828
2829 /*
2830 * If pmbox_done is empty, the driver thread gave up waiting and
2831 * continued running.
2832 */
2833 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2834 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2835 pmbox_done = pmboxq->ctx_u.mbox_wait;
2836 if (pmbox_done)
2837 complete(pmbox_done);
2838 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2839 return;
2840}
2841
2842static void
2843__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2844{
2845 unsigned long iflags;
2846
2847 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2848 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2849 spin_lock_irqsave(&ndlp->lock, iflags);
2850 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2851 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2852 spin_unlock_irqrestore(&ndlp->lock, iflags);
2853 }
2854 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2855}
2856
2857void
2858lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2859{
2860 __lpfc_sli_rpi_release(vport, ndlp);
2861}
2862
2863/**
2864 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2865 * @phba: Pointer to HBA context object.
2866 * @pmb: Pointer to mailbox object.
2867 *
2868 * This function is the default mailbox completion handler. It
2869 * frees the memory resources associated with the completed mailbox
2870 * command. If the completed command is a REG_LOGIN mailbox command,
2871 * this function will issue a UREG_LOGIN to re-claim the RPI.
2872 **/
2873void
2874lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2875{
2876 struct lpfc_vport *vport = pmb->vport;
2877 struct lpfc_dmabuf *mp;
2878 struct lpfc_nodelist *ndlp;
2879 struct Scsi_Host *shost;
2880 uint16_t rpi, vpi;
2881 int rc;
2882
2883 /*
2884 * If a REG_LOGIN succeeded after node is destroyed or node
2885 * is in re-discovery driver need to cleanup the RPI.
2886 */
2887 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
2888 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2889 !pmb->u.mb.mbxStatus) {
2890 mp = pmb->ctx_buf;
2891 if (mp) {
2892 pmb->ctx_buf = NULL;
2893 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2894 kfree(mp);
2895 }
2896 rpi = pmb->u.mb.un.varWords[0];
2897 vpi = pmb->u.mb.un.varRegLogin.vpi;
2898 if (phba->sli_rev == LPFC_SLI_REV4)
2899 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2900 lpfc_unreg_login(phba, vpi, rpi, pmb);
2901 pmb->vport = vport;
2902 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2903 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2904 if (rc != MBX_NOT_FINISHED)
2905 return;
2906 }
2907
2908 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2909 !test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
2910 !pmb->u.mb.mbxStatus) {
2911 shost = lpfc_shost_from_vport(vport);
2912 spin_lock_irq(shost->host_lock);
2913 vport->vpi_state |= LPFC_VPI_REGISTERED;
2914 spin_unlock_irq(shost->host_lock);
2915 clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
2916 }
2917
2918 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2919 ndlp = pmb->ctx_ndlp;
2920 lpfc_nlp_put(ndlp);
2921 }
2922
2923 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2924 ndlp = pmb->ctx_ndlp;
2925
2926 /* Check to see if there are any deferred events to process */
2927 if (ndlp) {
2928 lpfc_printf_vlog(
2929 vport,
2930 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2931 "1438 UNREG cmpl deferred mbox x%x "
2932 "on NPort x%x Data: x%x x%x x%px x%lx x%x\n",
2933 ndlp->nlp_rpi, ndlp->nlp_DID,
2934 ndlp->nlp_flag, ndlp->nlp_defer_did,
2935 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2936
2937 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2938 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2939 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2940 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2941 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2942 } else {
2943 __lpfc_sli_rpi_release(vport, ndlp);
2944 }
2945
2946 /* The unreg_login mailbox is complete and had a
2947 * reference that has to be released. The PLOGI
2948 * got its own ref.
2949 */
2950 lpfc_nlp_put(ndlp);
2951 pmb->ctx_ndlp = NULL;
2952 }
2953 }
2954
2955 /* This nlp_put pairs with lpfc_sli4_resume_rpi */
2956 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2957 ndlp = pmb->ctx_ndlp;
2958 lpfc_nlp_put(ndlp);
2959 }
2960
2961 /* Check security permission status on INIT_LINK mailbox command */
2962 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2963 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2964 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2965 "2860 SLI authentication is required "
2966 "for INIT_LINK but has not done yet\n");
2967
2968 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2969 lpfc_sli4_mbox_cmd_free(phba, pmb);
2970 else
2971 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2972}
2973 /**
2974 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2975 * @phba: Pointer to HBA context object.
2976 * @pmb: Pointer to mailbox object.
2977 *
2978 * This function is the unreg rpi mailbox completion handler. It
2979 * frees the memory resources associated with the completed mailbox
2980 * command. An additional reference is put on the ndlp to prevent
2981 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2982 * the unreg mailbox command completes, this routine puts the
2983 * reference back.
2984 *
2985 **/
2986void
2987lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2988{
2989 struct lpfc_vport *vport = pmb->vport;
2990 struct lpfc_nodelist *ndlp;
2991
2992 ndlp = pmb->ctx_ndlp;
2993 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2994 if (phba->sli_rev == LPFC_SLI_REV4 &&
2995 (bf_get(lpfc_sli_intf_if_type,
2996 &phba->sli4_hba.sli_intf) >=
2997 LPFC_SLI_INTF_IF_TYPE_2)) {
2998 if (ndlp) {
2999 lpfc_printf_vlog(
3000 vport, KERN_INFO,
3001 LOG_MBOX | LOG_SLI | LOG_NODE,
3002 "0010 UNREG_LOGIN vpi:x%x "
3003 "rpi:%x DID:%x defer x%x flg x%x "
3004 "x%px\n",
3005 vport->vpi, ndlp->nlp_rpi,
3006 ndlp->nlp_DID, ndlp->nlp_defer_did,
3007 ndlp->nlp_flag,
3008 ndlp);
3009 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3010
3011 /* Check to see if there are any deferred
3012 * events to process
3013 */
3014 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
3015 (ndlp->nlp_defer_did !=
3016 NLP_EVT_NOTHING_PENDING)) {
3017 lpfc_printf_vlog(
3018 vport, KERN_INFO,
3019 LOG_MBOX | LOG_SLI | LOG_NODE,
3020 "4111 UNREG cmpl deferred "
3021 "clr x%x on "
3022 "NPort x%x Data: x%x x%px\n",
3023 ndlp->nlp_rpi, ndlp->nlp_DID,
3024 ndlp->nlp_defer_did, ndlp);
3025 ndlp->nlp_flag &= ~NLP_UNREG_INP;
3026 ndlp->nlp_defer_did =
3027 NLP_EVT_NOTHING_PENDING;
3028 lpfc_issue_els_plogi(
3029 vport, ndlp->nlp_DID, 0);
3030 } else {
3031 __lpfc_sli_rpi_release(vport, ndlp);
3032 }
3033 lpfc_nlp_put(ndlp);
3034 }
3035 }
3036 }
3037
3038 mempool_free(pmb, phba->mbox_mem_pool);
3039}
3040
3041/**
3042 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
3043 * @phba: Pointer to HBA context object.
3044 *
3045 * This function is called with no lock held. This function processes all
3046 * the completed mailbox commands and gives it to upper layers. The interrupt
3047 * service routine processes mailbox completion interrupt and adds completed
3048 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
3049 * Worker thread call lpfc_sli_handle_mb_event, which will return the
3050 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
3051 * function returns the mailbox commands to the upper layer by calling the
3052 * completion handler function of each mailbox.
3053 **/
3054int
3055lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
3056{
3057 MAILBOX_t *pmbox;
3058 LPFC_MBOXQ_t *pmb;
3059 int rc;
3060 LIST_HEAD(cmplq);
3061
3062 phba->sli.slistat.mbox_event++;
3063
3064 /* Get all completed mailboxe buffers into the cmplq */
3065 spin_lock_irq(&phba->hbalock);
3066 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
3067 spin_unlock_irq(&phba->hbalock);
3068
3069 /* Get a Mailbox buffer to setup mailbox commands for callback */
3070 do {
3071 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
3072 if (pmb == NULL)
3073 break;
3074
3075 pmbox = &pmb->u.mb;
3076
3077 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
3078 if (pmb->vport) {
3079 lpfc_debugfs_disc_trc(pmb->vport,
3080 LPFC_DISC_TRC_MBOX_VPORT,
3081 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
3082 (uint32_t)pmbox->mbxCommand,
3083 pmbox->un.varWords[0],
3084 pmbox->un.varWords[1]);
3085 }
3086 else {
3087 lpfc_debugfs_disc_trc(phba->pport,
3088 LPFC_DISC_TRC_MBOX,
3089 "MBOX cmpl: cmd:x%x mb:x%x x%x",
3090 (uint32_t)pmbox->mbxCommand,
3091 pmbox->un.varWords[0],
3092 pmbox->un.varWords[1]);
3093 }
3094 }
3095
3096 /*
3097 * It is a fatal error if unknown mbox command completion.
3098 */
3099 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
3100 MBX_SHUTDOWN) {
3101 /* Unknown mailbox command compl */
3102 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3103 "(%d):0323 Unknown Mailbox command "
3104 "x%x (x%x/x%x) Cmpl\n",
3105 pmb->vport ? pmb->vport->vpi :
3106 LPFC_VPORT_UNKNOWN,
3107 pmbox->mbxCommand,
3108 lpfc_sli_config_mbox_subsys_get(phba,
3109 pmb),
3110 lpfc_sli_config_mbox_opcode_get(phba,
3111 pmb));
3112 phba->link_state = LPFC_HBA_ERROR;
3113 phba->work_hs = HS_FFER3;
3114 lpfc_handle_eratt(phba);
3115 continue;
3116 }
3117
3118 if (pmbox->mbxStatus) {
3119 phba->sli.slistat.mbox_stat_err++;
3120 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
3121 /* Mbox cmd cmpl error - RETRYing */
3122 lpfc_printf_log(phba, KERN_INFO,
3123 LOG_MBOX | LOG_SLI,
3124 "(%d):0305 Mbox cmd cmpl "
3125 "error - RETRYing Data: x%x "
3126 "(x%x/x%x) x%x x%x x%x\n",
3127 pmb->vport ? pmb->vport->vpi :
3128 LPFC_VPORT_UNKNOWN,
3129 pmbox->mbxCommand,
3130 lpfc_sli_config_mbox_subsys_get(phba,
3131 pmb),
3132 lpfc_sli_config_mbox_opcode_get(phba,
3133 pmb),
3134 pmbox->mbxStatus,
3135 pmbox->un.varWords[0],
3136 pmb->vport ? pmb->vport->port_state :
3137 LPFC_VPORT_UNKNOWN);
3138 pmbox->mbxStatus = 0;
3139 pmbox->mbxOwner = OWN_HOST;
3140 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3141 if (rc != MBX_NOT_FINISHED)
3142 continue;
3143 }
3144 }
3145
3146 /* Mailbox cmd <cmd> Cmpl <cmpl> */
3147 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3148 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
3149 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3150 "x%x x%x x%x\n",
3151 pmb->vport ? pmb->vport->vpi : 0,
3152 pmbox->mbxCommand,
3153 lpfc_sli_config_mbox_subsys_get(phba, pmb),
3154 lpfc_sli_config_mbox_opcode_get(phba, pmb),
3155 pmb->mbox_cmpl,
3156 *((uint32_t *) pmbox),
3157 pmbox->un.varWords[0],
3158 pmbox->un.varWords[1],
3159 pmbox->un.varWords[2],
3160 pmbox->un.varWords[3],
3161 pmbox->un.varWords[4],
3162 pmbox->un.varWords[5],
3163 pmbox->un.varWords[6],
3164 pmbox->un.varWords[7],
3165 pmbox->un.varWords[8],
3166 pmbox->un.varWords[9],
3167 pmbox->un.varWords[10]);
3168
3169 if (pmb->mbox_cmpl)
3170 pmb->mbox_cmpl(phba,pmb);
3171 } while (1);
3172 return 0;
3173}
3174
3175/**
3176 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
3177 * @phba: Pointer to HBA context object.
3178 * @pring: Pointer to driver SLI ring object.
3179 * @tag: buffer tag.
3180 *
3181 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
3182 * is set in the tag the buffer is posted for a particular exchange,
3183 * the function will return the buffer without replacing the buffer.
3184 * If the buffer is for unsolicited ELS or CT traffic, this function
3185 * returns the buffer and also posts another buffer to the firmware.
3186 **/
3187static struct lpfc_dmabuf *
3188lpfc_sli_get_buff(struct lpfc_hba *phba,
3189 struct lpfc_sli_ring *pring,
3190 uint32_t tag)
3191{
3192 struct hbq_dmabuf *hbq_entry;
3193
3194 if (tag & QUE_BUFTAG_BIT)
3195 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
3196 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
3197 if (!hbq_entry)
3198 return NULL;
3199 return &hbq_entry->dbuf;
3200}
3201
3202/**
3203 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
3204 * containing a NVME LS request.
3205 * @phba: pointer to lpfc hba data structure.
3206 * @piocb: pointer to the iocbq struct representing the sequence starting
3207 * frame.
3208 *
3209 * This routine initially validates the NVME LS, validates there is a login
3210 * with the port that sent the LS, and then calls the appropriate nvme host
3211 * or target LS request handler.
3212 **/
3213static void
3214lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
3215{
3216 struct lpfc_nodelist *ndlp;
3217 struct lpfc_dmabuf *d_buf;
3218 struct hbq_dmabuf *nvmebuf;
3219 struct fc_frame_header *fc_hdr;
3220 struct lpfc_async_xchg_ctx *axchg = NULL;
3221 char *failwhy = NULL;
3222 uint32_t oxid, sid, did, fctl, size;
3223 int ret = 1;
3224
3225 d_buf = piocb->cmd_dmabuf;
3226
3227 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
3228 fc_hdr = nvmebuf->hbuf.virt;
3229 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
3230 sid = sli4_sid_from_fc_hdr(fc_hdr);
3231 did = sli4_did_from_fc_hdr(fc_hdr);
3232 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3233 fc_hdr->fh_f_ctl[1] << 8 |
3234 fc_hdr->fh_f_ctl[2]);
3235 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
3236
3237 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
3238 oxid, size, sid);
3239
3240 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
3241 failwhy = "Driver Unloading";
3242 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
3243 failwhy = "NVME FC4 Disabled";
3244 } else if (!phba->nvmet_support && !phba->pport->localport) {
3245 failwhy = "No Localport";
3246 } else if (phba->nvmet_support && !phba->targetport) {
3247 failwhy = "No Targetport";
3248 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
3249 failwhy = "Bad NVME LS R_CTL";
3250 } else if (unlikely((fctl & 0x00FF0000) !=
3251 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
3252 failwhy = "Bad NVME LS F_CTL";
3253 } else {
3254 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
3255 if (!axchg)
3256 failwhy = "No CTX memory";
3257 }
3258
3259 if (unlikely(failwhy)) {
3260 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3261 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
3262 sid, oxid, failwhy);
3263 goto out_fail;
3264 }
3265
3266 /* validate the source of the LS is logged in */
3267 ndlp = lpfc_findnode_did(phba->pport, sid);
3268 if (!ndlp ||
3269 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3270 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3271 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
3272 "6216 NVME Unsol rcv: No ndlp: "
3273 "NPort_ID x%x oxid x%x\n",
3274 sid, oxid);
3275 goto out_fail;
3276 }
3277
3278 axchg->phba = phba;
3279 axchg->ndlp = ndlp;
3280 axchg->size = size;
3281 axchg->oxid = oxid;
3282 axchg->sid = sid;
3283 axchg->wqeq = NULL;
3284 axchg->state = LPFC_NVME_STE_LS_RCV;
3285 axchg->entry_cnt = 1;
3286 axchg->rqb_buffer = (void *)nvmebuf;
3287 axchg->hdwq = &phba->sli4_hba.hdwq[0];
3288 axchg->payload = nvmebuf->dbuf.virt;
3289 INIT_LIST_HEAD(&axchg->list);
3290
3291 if (phba->nvmet_support) {
3292 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3293 spin_lock_irq(&ndlp->lock);
3294 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3295 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3296 spin_unlock_irq(&ndlp->lock);
3297
3298 /* This reference is a single occurrence to hold the
3299 * node valid until the nvmet transport calls
3300 * host_release.
3301 */
3302 if (!lpfc_nlp_get(ndlp))
3303 goto out_fail;
3304
3305 lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3306 "6206 NVMET unsol ls_req ndlp x%px "
3307 "DID x%x xflags x%x refcnt %d\n",
3308 ndlp, ndlp->nlp_DID,
3309 ndlp->fc4_xpt_flags,
3310 kref_read(&ndlp->kref));
3311 } else {
3312 spin_unlock_irq(&ndlp->lock);
3313 }
3314 } else {
3315 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3316 }
3317
3318 /* if zero, LS was successfully handled. If non-zero, LS not handled */
3319 if (!ret)
3320 return;
3321
3322out_fail:
3323 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3324 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3325 "NVMe%s handler failed %d\n",
3326 did, sid, oxid,
3327 (phba->nvmet_support) ? "T" : "I", ret);
3328
3329 /* recycle receive buffer */
3330 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3331
3332 /* If start of new exchange, abort it */
3333 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3334 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3335
3336 if (ret)
3337 kfree(axchg);
3338}
3339
3340/**
3341 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3342 * @phba: Pointer to HBA context object.
3343 * @pring: Pointer to driver SLI ring object.
3344 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3345 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3346 * @fch_type: the type for the first frame of the sequence.
3347 *
3348 * This function is called with no lock held. This function uses the r_ctl and
3349 * type of the received sequence to find the correct callback function to call
3350 * to process the sequence.
3351 **/
3352static int
3353lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3354 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3355 uint32_t fch_type)
3356{
3357 int i;
3358
3359 switch (fch_type) {
3360 case FC_TYPE_NVME:
3361 lpfc_nvme_unsol_ls_handler(phba, saveq);
3362 return 1;
3363 default:
3364 break;
3365 }
3366
3367 /* unSolicited Responses */
3368 if (pring->prt[0].profile) {
3369 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3370 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3371 saveq);
3372 return 1;
3373 }
3374 /* We must search, based on rctl / type
3375 for the right routine */
3376 for (i = 0; i < pring->num_mask; i++) {
3377 if ((pring->prt[i].rctl == fch_r_ctl) &&
3378 (pring->prt[i].type == fch_type)) {
3379 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3380 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3381 (phba, pring, saveq);
3382 return 1;
3383 }
3384 }
3385 return 0;
3386}
3387
3388static void
3389lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
3390 struct lpfc_iocbq *saveq)
3391{
3392 IOCB_t *irsp;
3393 union lpfc_wqe128 *wqe;
3394 u16 i = 0;
3395
3396 irsp = &saveq->iocb;
3397 wqe = &saveq->wqe;
3398
3399 /* Fill wcqe with the IOCB status fields */
3400 bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
3401 saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
3402 saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
3403 saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
3404
3405 /* Source ID */
3406 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
3407
3408 /* rx-id of the response frame */
3409 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
3410
3411 /* ox-id of the frame */
3412 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
3413 irsp->unsli3.rcvsli3.ox_id);
3414
3415 /* DID */
3416 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
3417 irsp->un.rcvels.remoteID);
3418
3419 /* unsol data len */
3420 for (i = 0; i < irsp->ulpBdeCount; i++) {
3421 struct lpfc_hbq_entry *hbqe = NULL;
3422
3423 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3424 if (i == 0) {
3425 hbqe = (struct lpfc_hbq_entry *)
3426 &irsp->un.ulpWord[0];
3427 saveq->wqe.gen_req.bde.tus.f.bdeSize =
3428 hbqe->bde.tus.f.bdeSize;
3429 } else if (i == 1) {
3430 hbqe = (struct lpfc_hbq_entry *)
3431 &irsp->unsli3.sli3Words[4];
3432 saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
3433 }
3434 }
3435 }
3436}
3437
3438/**
3439 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3440 * @phba: Pointer to HBA context object.
3441 * @pring: Pointer to driver SLI ring object.
3442 * @saveq: Pointer to the unsolicited iocb.
3443 *
3444 * This function is called with no lock held by the ring event handler
3445 * when there is an unsolicited iocb posted to the response ring by the
3446 * firmware. This function gets the buffer associated with the iocbs
3447 * and calls the event handler for the ring. This function handles both
3448 * qring buffers and hbq buffers.
3449 * When the function returns 1 the caller can free the iocb object otherwise
3450 * upper layer functions will free the iocb objects.
3451 **/
3452static int
3453lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3454 struct lpfc_iocbq *saveq)
3455{
3456 IOCB_t * irsp;
3457 WORD5 * w5p;
3458 dma_addr_t paddr;
3459 uint32_t Rctl, Type;
3460 struct lpfc_iocbq *iocbq;
3461 struct lpfc_dmabuf *dmzbuf;
3462
3463 irsp = &saveq->iocb;
3464 saveq->vport = phba->pport;
3465
3466 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3467 if (pring->lpfc_sli_rcv_async_status)
3468 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3469 else
3470 lpfc_printf_log(phba,
3471 KERN_WARNING,
3472 LOG_SLI,
3473 "0316 Ring %d handler: unexpected "
3474 "ASYNC_STATUS iocb received evt_code "
3475 "0x%x\n",
3476 pring->ringno,
3477 irsp->un.asyncstat.evt_code);
3478 return 1;
3479 }
3480
3481 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3482 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3483 if (irsp->ulpBdeCount > 0) {
3484 dmzbuf = lpfc_sli_get_buff(phba, pring,
3485 irsp->un.ulpWord[3]);
3486 lpfc_in_buf_free(phba, dmzbuf);
3487 }
3488
3489 if (irsp->ulpBdeCount > 1) {
3490 dmzbuf = lpfc_sli_get_buff(phba, pring,
3491 irsp->unsli3.sli3Words[3]);
3492 lpfc_in_buf_free(phba, dmzbuf);
3493 }
3494
3495 if (irsp->ulpBdeCount > 2) {
3496 dmzbuf = lpfc_sli_get_buff(phba, pring,
3497 irsp->unsli3.sli3Words[7]);
3498 lpfc_in_buf_free(phba, dmzbuf);
3499 }
3500
3501 return 1;
3502 }
3503
3504 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3505 if (irsp->ulpBdeCount != 0) {
3506 saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
3507 irsp->un.ulpWord[3]);
3508 if (!saveq->cmd_dmabuf)
3509 lpfc_printf_log(phba,
3510 KERN_ERR,
3511 LOG_SLI,
3512 "0341 Ring %d Cannot find buffer for "
3513 "an unsolicited iocb. tag 0x%x\n",
3514 pring->ringno,
3515 irsp->un.ulpWord[3]);
3516 }
3517 if (irsp->ulpBdeCount == 2) {
3518 saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
3519 irsp->unsli3.sli3Words[7]);
3520 if (!saveq->bpl_dmabuf)
3521 lpfc_printf_log(phba,
3522 KERN_ERR,
3523 LOG_SLI,
3524 "0342 Ring %d Cannot find buffer for an"
3525 " unsolicited iocb. tag 0x%x\n",
3526 pring->ringno,
3527 irsp->unsli3.sli3Words[7]);
3528 }
3529 list_for_each_entry(iocbq, &saveq->list, list) {
3530 irsp = &iocbq->iocb;
3531 if (irsp->ulpBdeCount != 0) {
3532 iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
3533 pring,
3534 irsp->un.ulpWord[3]);
3535 if (!iocbq->cmd_dmabuf)
3536 lpfc_printf_log(phba,
3537 KERN_ERR,
3538 LOG_SLI,
3539 "0343 Ring %d Cannot find "
3540 "buffer for an unsolicited iocb"
3541 ". tag 0x%x\n", pring->ringno,
3542 irsp->un.ulpWord[3]);
3543 }
3544 if (irsp->ulpBdeCount == 2) {
3545 iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
3546 pring,
3547 irsp->unsli3.sli3Words[7]);
3548 if (!iocbq->bpl_dmabuf)
3549 lpfc_printf_log(phba,
3550 KERN_ERR,
3551 LOG_SLI,
3552 "0344 Ring %d Cannot find "
3553 "buffer for an unsolicited "
3554 "iocb. tag 0x%x\n",
3555 pring->ringno,
3556 irsp->unsli3.sli3Words[7]);
3557 }
3558 }
3559 } else {
3560 paddr = getPaddr(irsp->un.cont64[0].addrHigh,
3561 irsp->un.cont64[0].addrLow);
3562 saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
3563 paddr);
3564 if (irsp->ulpBdeCount == 2) {
3565 paddr = getPaddr(irsp->un.cont64[1].addrHigh,
3566 irsp->un.cont64[1].addrLow);
3567 saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
3568 pring,
3569 paddr);
3570 }
3571 }
3572
3573 if (irsp->ulpBdeCount != 0 &&
3574 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3575 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3576 int found = 0;
3577
3578 /* search continue save q for same XRI */
3579 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3580 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3581 saveq->iocb.unsli3.rcvsli3.ox_id) {
3582 list_add_tail(&saveq->list, &iocbq->list);
3583 found = 1;
3584 break;
3585 }
3586 }
3587 if (!found)
3588 list_add_tail(&saveq->clist,
3589 &pring->iocb_continue_saveq);
3590
3591 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3592 list_del_init(&iocbq->clist);
3593 saveq = iocbq;
3594 irsp = &saveq->iocb;
3595 } else {
3596 return 0;
3597 }
3598 }
3599 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3600 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3601 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3602 Rctl = FC_RCTL_ELS_REQ;
3603 Type = FC_TYPE_ELS;
3604 } else {
3605 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3606 Rctl = w5p->hcsw.Rctl;
3607 Type = w5p->hcsw.Type;
3608
3609 /* Firmware Workaround */
3610 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3611 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3612 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3613 Rctl = FC_RCTL_ELS_REQ;
3614 Type = FC_TYPE_ELS;
3615 w5p->hcsw.Rctl = Rctl;
3616 w5p->hcsw.Type = Type;
3617 }
3618 }
3619
3620 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3621 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
3622 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3623 if (irsp->unsli3.rcvsli3.vpi == 0xffff)
3624 saveq->vport = phba->pport;
3625 else
3626 saveq->vport = lpfc_find_vport_by_vpid(phba,
3627 irsp->unsli3.rcvsli3.vpi);
3628 }
3629
3630 /* Prepare WQE with Unsol frame */
3631 lpfc_sli_prep_unsol_wqe(phba, saveq);
3632
3633 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3634 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3635 "0313 Ring %d handler: unexpected Rctl x%x "
3636 "Type x%x received\n",
3637 pring->ringno, Rctl, Type);
3638
3639 return 1;
3640}
3641
3642/**
3643 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3644 * @phba: Pointer to HBA context object.
3645 * @pring: Pointer to driver SLI ring object.
3646 * @prspiocb: Pointer to response iocb object.
3647 *
3648 * This function looks up the iocb_lookup table to get the command iocb
3649 * corresponding to the given response iocb using the iotag of the
3650 * response iocb. The driver calls this function with the hbalock held
3651 * for SLI3 ports or the ring lock held for SLI4 ports.
3652 * This function returns the command iocb object if it finds the command
3653 * iocb else returns NULL.
3654 **/
3655static struct lpfc_iocbq *
3656lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3657 struct lpfc_sli_ring *pring,
3658 struct lpfc_iocbq *prspiocb)
3659{
3660 struct lpfc_iocbq *cmd_iocb = NULL;
3661 u16 iotag;
3662
3663 if (phba->sli_rev == LPFC_SLI_REV4)
3664 iotag = get_wqe_reqtag(prspiocb);
3665 else
3666 iotag = prspiocb->iocb.ulpIoTag;
3667
3668 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3669 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3670 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3671 /* remove from txcmpl queue list */
3672 list_del_init(&cmd_iocb->list);
3673 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3674 pring->txcmplq_cnt--;
3675 return cmd_iocb;
3676 }
3677 }
3678
3679 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3680 "0317 iotag x%x is out of "
3681 "range: max iotag x%x\n",
3682 iotag, phba->sli.last_iotag);
3683 return NULL;
3684}
3685
3686/**
3687 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3688 * @phba: Pointer to HBA context object.
3689 * @pring: Pointer to driver SLI ring object.
3690 * @iotag: IOCB tag.
3691 *
3692 * This function looks up the iocb_lookup table to get the command iocb
3693 * corresponding to the given iotag. The driver calls this function with
3694 * the ring lock held because this function is an SLI4 port only helper.
3695 * This function returns the command iocb object if it finds the command
3696 * iocb else returns NULL.
3697 **/
3698static struct lpfc_iocbq *
3699lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3700 struct lpfc_sli_ring *pring, uint16_t iotag)
3701{
3702 struct lpfc_iocbq *cmd_iocb = NULL;
3703
3704 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3705 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3706 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3707 /* remove from txcmpl queue list */
3708 list_del_init(&cmd_iocb->list);
3709 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3710 pring->txcmplq_cnt--;
3711 return cmd_iocb;
3712 }
3713 }
3714
3715 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3716 "0372 iotag x%x lookup error: max iotag (x%x) "
3717 "cmd_flag x%x\n",
3718 iotag, phba->sli.last_iotag,
3719 cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
3720 return NULL;
3721}
3722
3723/**
3724 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3725 * @phba: Pointer to HBA context object.
3726 * @pring: Pointer to driver SLI ring object.
3727 * @saveq: Pointer to the response iocb to be processed.
3728 *
3729 * This function is called by the ring event handler for non-fcp
3730 * rings when there is a new response iocb in the response ring.
3731 * The caller is not required to hold any locks. This function
3732 * gets the command iocb associated with the response iocb and
3733 * calls the completion handler for the command iocb. If there
3734 * is no completion handler, the function will free the resources
3735 * associated with command iocb. If the response iocb is for
3736 * an already aborted command iocb, the status of the completion
3737 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3738 * This function always returns 1.
3739 **/
3740static int
3741lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3742 struct lpfc_iocbq *saveq)
3743{
3744 struct lpfc_iocbq *cmdiocbp;
3745 unsigned long iflag;
3746 u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
3747
3748 if (phba->sli_rev == LPFC_SLI_REV4)
3749 spin_lock_irqsave(&pring->ring_lock, iflag);
3750 else
3751 spin_lock_irqsave(&phba->hbalock, iflag);
3752 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3753 if (phba->sli_rev == LPFC_SLI_REV4)
3754 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3755 else
3756 spin_unlock_irqrestore(&phba->hbalock, iflag);
3757
3758 ulp_command = get_job_cmnd(phba, saveq);
3759 ulp_status = get_job_ulpstatus(phba, saveq);
3760 ulp_word4 = get_job_word4(phba, saveq);
3761 ulp_context = get_job_ulpcontext(phba, saveq);
3762 if (phba->sli_rev == LPFC_SLI_REV4)
3763 iotag = get_wqe_reqtag(saveq);
3764 else
3765 iotag = saveq->iocb.ulpIoTag;
3766
3767 if (cmdiocbp) {
3768 ulp_command = get_job_cmnd(phba, cmdiocbp);
3769 if (cmdiocbp->cmd_cmpl) {
3770 /*
3771 * If an ELS command failed send an event to mgmt
3772 * application.
3773 */
3774 if (ulp_status &&
3775 (pring->ringno == LPFC_ELS_RING) &&
3776 (ulp_command == CMD_ELS_REQUEST64_CR))
3777 lpfc_send_els_failure_event(phba,
3778 cmdiocbp, saveq);
3779
3780 /*
3781 * Post all ELS completions to the worker thread.
3782 * All other are passed to the completion callback.
3783 */
3784 if (pring->ringno == LPFC_ELS_RING) {
3785 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3786 (cmdiocbp->cmd_flag &
3787 LPFC_DRIVER_ABORTED)) {
3788 spin_lock_irqsave(&phba->hbalock,
3789 iflag);
3790 cmdiocbp->cmd_flag &=
3791 ~LPFC_DRIVER_ABORTED;
3792 spin_unlock_irqrestore(&phba->hbalock,
3793 iflag);
3794 saveq->iocb.ulpStatus =
3795 IOSTAT_LOCAL_REJECT;
3796 saveq->iocb.un.ulpWord[4] =
3797 IOERR_SLI_ABORTED;
3798
3799 /* Firmware could still be in progress
3800 * of DMAing payload, so don't free data
3801 * buffer till after a hbeat.
3802 */
3803 spin_lock_irqsave(&phba->hbalock,
3804 iflag);
3805 saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
3806 spin_unlock_irqrestore(&phba->hbalock,
3807 iflag);
3808 }
3809 if (phba->sli_rev == LPFC_SLI_REV4) {
3810 if (saveq->cmd_flag &
3811 LPFC_EXCHANGE_BUSY) {
3812 /* Set cmdiocb flag for the
3813 * exchange busy so sgl (xri)
3814 * will not be released until
3815 * the abort xri is received
3816 * from hba.
3817 */
3818 spin_lock_irqsave(
3819 &phba->hbalock, iflag);
3820 cmdiocbp->cmd_flag |=
3821 LPFC_EXCHANGE_BUSY;
3822 spin_unlock_irqrestore(
3823 &phba->hbalock, iflag);
3824 }
3825 if (cmdiocbp->cmd_flag &
3826 LPFC_DRIVER_ABORTED) {
3827 /*
3828 * Clear LPFC_DRIVER_ABORTED
3829 * bit in case it was driver
3830 * initiated abort.
3831 */
3832 spin_lock_irqsave(
3833 &phba->hbalock, iflag);
3834 cmdiocbp->cmd_flag &=
3835 ~LPFC_DRIVER_ABORTED;
3836 spin_unlock_irqrestore(
3837 &phba->hbalock, iflag);
3838 set_job_ulpstatus(cmdiocbp,
3839 IOSTAT_LOCAL_REJECT);
3840 set_job_ulpword4(cmdiocbp,
3841 IOERR_ABORT_REQUESTED);
3842 /*
3843 * For SLI4, irspiocb contains
3844 * NO_XRI in sli_xritag, it
3845 * shall not affect releasing
3846 * sgl (xri) process.
3847 */
3848 set_job_ulpstatus(saveq,
3849 IOSTAT_LOCAL_REJECT);
3850 set_job_ulpword4(saveq,
3851 IOERR_SLI_ABORTED);
3852 spin_lock_irqsave(
3853 &phba->hbalock, iflag);
3854 saveq->cmd_flag |=
3855 LPFC_DELAY_MEM_FREE;
3856 spin_unlock_irqrestore(
3857 &phba->hbalock, iflag);
3858 }
3859 }
3860 }
3861 cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
3862 } else
3863 lpfc_sli_release_iocbq(phba, cmdiocbp);
3864 } else {
3865 /*
3866 * Unknown initiating command based on the response iotag.
3867 * This could be the case on the ELS ring because of
3868 * lpfc_els_abort().
3869 */
3870 if (pring->ringno != LPFC_ELS_RING) {
3871 /*
3872 * Ring <ringno> handler: unexpected completion IoTag
3873 * <IoTag>
3874 */
3875 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3876 "0322 Ring %d handler: "
3877 "unexpected completion IoTag x%x "
3878 "Data: x%x x%x x%x x%x\n",
3879 pring->ringno, iotag, ulp_status,
3880 ulp_word4, ulp_command, ulp_context);
3881 }
3882 }
3883
3884 return 1;
3885}
3886
3887/**
3888 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3889 * @phba: Pointer to HBA context object.
3890 * @pring: Pointer to driver SLI ring object.
3891 *
3892 * This function is called from the iocb ring event handlers when
3893 * put pointer is ahead of the get pointer for a ring. This function signal
3894 * an error attention condition to the worker thread and the worker
3895 * thread will transition the HBA to offline state.
3896 **/
3897static void
3898lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3899{
3900 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3901 /*
3902 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3903 * rsp ring <portRspMax>
3904 */
3905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3906 "0312 Ring %d handler: portRspPut %d "
3907 "is bigger than rsp ring %d\n",
3908 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3909 pring->sli.sli3.numRiocb);
3910
3911 phba->link_state = LPFC_HBA_ERROR;
3912
3913 /*
3914 * All error attention handlers are posted to
3915 * worker thread
3916 */
3917 phba->work_ha |= HA_ERATT;
3918 phba->work_hs = HS_FFER3;
3919
3920 lpfc_worker_wake_up(phba);
3921
3922 return;
3923}
3924
3925/**
3926 * lpfc_poll_eratt - Error attention polling timer timeout handler
3927 * @t: Context to fetch pointer to address of HBA context object from.
3928 *
3929 * This function is invoked by the Error Attention polling timer when the
3930 * timer times out. It will check the SLI Error Attention register for
3931 * possible attention events. If so, it will post an Error Attention event
3932 * and wake up worker thread to process it. Otherwise, it will set up the
3933 * Error Attention polling timer for the next poll.
3934 **/
3935void lpfc_poll_eratt(struct timer_list *t)
3936{
3937 struct lpfc_hba *phba;
3938 uint32_t eratt = 0;
3939 uint64_t sli_intr, cnt;
3940
3941 phba = from_timer(phba, t, eratt_poll);
3942 if (!test_bit(HBA_SETUP, &phba->hba_flag))
3943 return;
3944
3945 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
3946 return;
3947
3948 /* Here we will also keep track of interrupts per sec of the hba */
3949 sli_intr = phba->sli.slistat.sli_intr;
3950
3951 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3952 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3953 sli_intr);
3954 else
3955 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3956
3957 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3958 do_div(cnt, phba->eratt_poll_interval);
3959 phba->sli.slistat.sli_ips = cnt;
3960
3961 phba->sli.slistat.sli_prev_intr = sli_intr;
3962
3963 /* Check chip HA register for error event */
3964 eratt = lpfc_sli_check_eratt(phba);
3965
3966 if (eratt)
3967 /* Tell the worker thread there is work to do */
3968 lpfc_worker_wake_up(phba);
3969 else
3970 /* Restart the timer for next eratt poll */
3971 mod_timer(&phba->eratt_poll,
3972 jiffies +
3973 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3974 return;
3975}
3976
3977
3978/**
3979 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3980 * @phba: Pointer to HBA context object.
3981 * @pring: Pointer to driver SLI ring object.
3982 * @mask: Host attention register mask for this ring.
3983 *
3984 * This function is called from the interrupt context when there is a ring
3985 * event for the fcp ring. The caller does not hold any lock.
3986 * The function processes each response iocb in the response ring until it
3987 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3988 * LE bit set. The function will call the completion handler of the command iocb
3989 * if the response iocb indicates a completion for a command iocb or it is
3990 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3991 * function if this is an unsolicited iocb.
3992 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3993 * to check it explicitly.
3994 */
3995int
3996lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3997 struct lpfc_sli_ring *pring, uint32_t mask)
3998{
3999 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
4000 IOCB_t *irsp = NULL;
4001 IOCB_t *entry = NULL;
4002 struct lpfc_iocbq *cmdiocbq = NULL;
4003 struct lpfc_iocbq rspiocbq;
4004 uint32_t status;
4005 uint32_t portRspPut, portRspMax;
4006 int rc = 1;
4007 lpfc_iocb_type type;
4008 unsigned long iflag;
4009 uint32_t rsp_cmpl = 0;
4010
4011 spin_lock_irqsave(&phba->hbalock, iflag);
4012 pring->stats.iocb_event++;
4013
4014 /*
4015 * The next available response entry should never exceed the maximum
4016 * entries. If it does, treat it as an adapter hardware error.
4017 */
4018 portRspMax = pring->sli.sli3.numRiocb;
4019 portRspPut = le32_to_cpu(pgp->rspPutInx);
4020 if (unlikely(portRspPut >= portRspMax)) {
4021 lpfc_sli_rsp_pointers_error(phba, pring);
4022 spin_unlock_irqrestore(&phba->hbalock, iflag);
4023 return 1;
4024 }
4025 if (phba->fcp_ring_in_use) {
4026 spin_unlock_irqrestore(&phba->hbalock, iflag);
4027 return 1;
4028 } else
4029 phba->fcp_ring_in_use = 1;
4030
4031 rmb();
4032 while (pring->sli.sli3.rspidx != portRspPut) {
4033 /*
4034 * Fetch an entry off the ring and copy it into a local data
4035 * structure. The copy involves a byte-swap since the
4036 * network byte order and pci byte orders are different.
4037 */
4038 entry = lpfc_resp_iocb(phba, pring);
4039 phba->last_completion_time = jiffies;
4040
4041 if (++pring->sli.sli3.rspidx >= portRspMax)
4042 pring->sli.sli3.rspidx = 0;
4043
4044 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
4045 (uint32_t *) &rspiocbq.iocb,
4046 phba->iocb_rsp_size);
4047 INIT_LIST_HEAD(&(rspiocbq.list));
4048 irsp = &rspiocbq.iocb;
4049
4050 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
4051 pring->stats.iocb_rsp++;
4052 rsp_cmpl++;
4053
4054 if (unlikely(irsp->ulpStatus)) {
4055 /*
4056 * If resource errors reported from HBA, reduce
4057 * queuedepths of the SCSI device.
4058 */
4059 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4060 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
4061 IOERR_NO_RESOURCES)) {
4062 spin_unlock_irqrestore(&phba->hbalock, iflag);
4063 phba->lpfc_rampdown_queue_depth(phba);
4064 spin_lock_irqsave(&phba->hbalock, iflag);
4065 }
4066
4067 /* Rsp ring <ringno> error: IOCB */
4068 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4069 "0336 Rsp Ring %d error: IOCB Data: "
4070 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
4071 pring->ringno,
4072 irsp->un.ulpWord[0],
4073 irsp->un.ulpWord[1],
4074 irsp->un.ulpWord[2],
4075 irsp->un.ulpWord[3],
4076 irsp->un.ulpWord[4],
4077 irsp->un.ulpWord[5],
4078 *(uint32_t *)&irsp->un1,
4079 *((uint32_t *)&irsp->un1 + 1));
4080 }
4081
4082 switch (type) {
4083 case LPFC_ABORT_IOCB:
4084 case LPFC_SOL_IOCB:
4085 /*
4086 * Idle exchange closed via ABTS from port. No iocb
4087 * resources need to be recovered.
4088 */
4089 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
4090 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4091 "0333 IOCB cmd 0x%x"
4092 " processed. Skipping"
4093 " completion\n",
4094 irsp->ulpCommand);
4095 break;
4096 }
4097
4098 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
4099 &rspiocbq);
4100 if (unlikely(!cmdiocbq))
4101 break;
4102 if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
4103 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
4104 if (cmdiocbq->cmd_cmpl) {
4105 spin_unlock_irqrestore(&phba->hbalock, iflag);
4106 cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
4107 spin_lock_irqsave(&phba->hbalock, iflag);
4108 }
4109 break;
4110 case LPFC_UNSOL_IOCB:
4111 spin_unlock_irqrestore(&phba->hbalock, iflag);
4112 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
4113 spin_lock_irqsave(&phba->hbalock, iflag);
4114 break;
4115 default:
4116 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4117 char adaptermsg[LPFC_MAX_ADPTMSG];
4118 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4119 memcpy(&adaptermsg[0], (uint8_t *) irsp,
4120 MAX_MSG_DATA);
4121 dev_warn(&((phba->pcidev)->dev),
4122 "lpfc%d: %s\n",
4123 phba->brd_no, adaptermsg);
4124 } else {
4125 /* Unknown IOCB command */
4126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4127 "0334 Unknown IOCB command "
4128 "Data: x%x, x%x x%x x%x x%x\n",
4129 type, irsp->ulpCommand,
4130 irsp->ulpStatus,
4131 irsp->ulpIoTag,
4132 irsp->ulpContext);
4133 }
4134 break;
4135 }
4136
4137 /*
4138 * The response IOCB has been processed. Update the ring
4139 * pointer in SLIM. If the port response put pointer has not
4140 * been updated, sync the pgp->rspPutInx and fetch the new port
4141 * response put pointer.
4142 */
4143 writel(pring->sli.sli3.rspidx,
4144 &phba->host_gp[pring->ringno].rspGetInx);
4145
4146 if (pring->sli.sli3.rspidx == portRspPut)
4147 portRspPut = le32_to_cpu(pgp->rspPutInx);
4148 }
4149
4150 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
4151 pring->stats.iocb_rsp_full++;
4152 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4153 writel(status, phba->CAregaddr);
4154 readl(phba->CAregaddr);
4155 }
4156 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4157 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4158 pring->stats.iocb_cmd_empty++;
4159
4160 /* Force update of the local copy of cmdGetInx */
4161 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4162 lpfc_sli_resume_iocb(phba, pring);
4163
4164 if ((pring->lpfc_sli_cmd_available))
4165 (pring->lpfc_sli_cmd_available) (phba, pring);
4166
4167 }
4168
4169 phba->fcp_ring_in_use = 0;
4170 spin_unlock_irqrestore(&phba->hbalock, iflag);
4171 return rc;
4172}
4173
4174/**
4175 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
4176 * @phba: Pointer to HBA context object.
4177 * @pring: Pointer to driver SLI ring object.
4178 * @rspiocbp: Pointer to driver response IOCB object.
4179 *
4180 * This function is called from the worker thread when there is a slow-path
4181 * response IOCB to process. This function chains all the response iocbs until
4182 * seeing the iocb with the LE bit set. The function will call
4183 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
4184 * completion of a command iocb. The function will call the
4185 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
4186 * The function frees the resources or calls the completion handler if this
4187 * iocb is an abort completion. The function returns NULL when the response
4188 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
4189 * this function shall chain the iocb on to the iocb_continueq and return the
4190 * response iocb passed in.
4191 **/
4192static struct lpfc_iocbq *
4193lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4194 struct lpfc_iocbq *rspiocbp)
4195{
4196 struct lpfc_iocbq *saveq;
4197 struct lpfc_iocbq *cmdiocb;
4198 struct lpfc_iocbq *next_iocb;
4199 IOCB_t *irsp;
4200 uint32_t free_saveq;
4201 u8 cmd_type;
4202 lpfc_iocb_type type;
4203 unsigned long iflag;
4204 u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
4205 u32 ulp_word4 = get_job_word4(phba, rspiocbp);
4206 u32 ulp_command = get_job_cmnd(phba, rspiocbp);
4207 int rc;
4208
4209 spin_lock_irqsave(&phba->hbalock, iflag);
4210 /* First add the response iocb to the countinueq list */
4211 list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
4212 pring->iocb_continueq_cnt++;
4213
4214 /*
4215 * By default, the driver expects to free all resources
4216 * associated with this iocb completion.
4217 */
4218 free_saveq = 1;
4219 saveq = list_get_first(&pring->iocb_continueq,
4220 struct lpfc_iocbq, list);
4221 list_del_init(&pring->iocb_continueq);
4222 pring->iocb_continueq_cnt = 0;
4223
4224 pring->stats.iocb_rsp++;
4225
4226 /*
4227 * If resource errors reported from HBA, reduce
4228 * queuedepths of the SCSI device.
4229 */
4230 if (ulp_status == IOSTAT_LOCAL_REJECT &&
4231 ((ulp_word4 & IOERR_PARAM_MASK) ==
4232 IOERR_NO_RESOURCES)) {
4233 spin_unlock_irqrestore(&phba->hbalock, iflag);
4234 phba->lpfc_rampdown_queue_depth(phba);
4235 spin_lock_irqsave(&phba->hbalock, iflag);
4236 }
4237
4238 if (ulp_status) {
4239 /* Rsp ring <ringno> error: IOCB */
4240 if (phba->sli_rev < LPFC_SLI_REV4) {
4241 irsp = &rspiocbp->iocb;
4242 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4243 "0328 Rsp Ring %d error: ulp_status x%x "
4244 "IOCB Data: "
4245 "x%08x x%08x x%08x x%08x "
4246 "x%08x x%08x x%08x x%08x "
4247 "x%08x x%08x x%08x x%08x "
4248 "x%08x x%08x x%08x x%08x\n",
4249 pring->ringno, ulp_status,
4250 get_job_ulpword(rspiocbp, 0),
4251 get_job_ulpword(rspiocbp, 1),
4252 get_job_ulpword(rspiocbp, 2),
4253 get_job_ulpword(rspiocbp, 3),
4254 get_job_ulpword(rspiocbp, 4),
4255 get_job_ulpword(rspiocbp, 5),
4256 *(((uint32_t *)irsp) + 6),
4257 *(((uint32_t *)irsp) + 7),
4258 *(((uint32_t *)irsp) + 8),
4259 *(((uint32_t *)irsp) + 9),
4260 *(((uint32_t *)irsp) + 10),
4261 *(((uint32_t *)irsp) + 11),
4262 *(((uint32_t *)irsp) + 12),
4263 *(((uint32_t *)irsp) + 13),
4264 *(((uint32_t *)irsp) + 14),
4265 *(((uint32_t *)irsp) + 15));
4266 } else {
4267 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4268 "0321 Rsp Ring %d error: "
4269 "IOCB Data: "
4270 "x%x x%x x%x x%x\n",
4271 pring->ringno,
4272 rspiocbp->wcqe_cmpl.word0,
4273 rspiocbp->wcqe_cmpl.total_data_placed,
4274 rspiocbp->wcqe_cmpl.parameter,
4275 rspiocbp->wcqe_cmpl.word3);
4276 }
4277 }
4278
4279
4280 /*
4281 * Fetch the iocb command type and call the correct completion
4282 * routine. Solicited and Unsolicited IOCBs on the ELS ring
4283 * get freed back to the lpfc_iocb_list by the discovery
4284 * kernel thread.
4285 */
4286 cmd_type = ulp_command & CMD_IOCB_MASK;
4287 type = lpfc_sli_iocb_cmd_type(cmd_type);
4288 switch (type) {
4289 case LPFC_SOL_IOCB:
4290 spin_unlock_irqrestore(&phba->hbalock, iflag);
4291 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
4292 spin_lock_irqsave(&phba->hbalock, iflag);
4293 break;
4294 case LPFC_UNSOL_IOCB:
4295 spin_unlock_irqrestore(&phba->hbalock, iflag);
4296 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
4297 spin_lock_irqsave(&phba->hbalock, iflag);
4298 if (!rc)
4299 free_saveq = 0;
4300 break;
4301 case LPFC_ABORT_IOCB:
4302 cmdiocb = NULL;
4303 if (ulp_command != CMD_XRI_ABORTED_CX)
4304 cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
4305 saveq);
4306 if (cmdiocb) {
4307 /* Call the specified completion routine */
4308 if (cmdiocb->cmd_cmpl) {
4309 spin_unlock_irqrestore(&phba->hbalock, iflag);
4310 cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
4311 spin_lock_irqsave(&phba->hbalock, iflag);
4312 } else {
4313 __lpfc_sli_release_iocbq(phba, cmdiocb);
4314 }
4315 }
4316 break;
4317 case LPFC_UNKNOWN_IOCB:
4318 if (ulp_command == CMD_ADAPTER_MSG) {
4319 char adaptermsg[LPFC_MAX_ADPTMSG];
4320
4321 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4322 memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
4323 MAX_MSG_DATA);
4324 dev_warn(&((phba->pcidev)->dev),
4325 "lpfc%d: %s\n",
4326 phba->brd_no, adaptermsg);
4327 } else {
4328 /* Unknown command */
4329 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4330 "0335 Unknown IOCB "
4331 "command Data: x%x "
4332 "x%x x%x x%x\n",
4333 ulp_command,
4334 ulp_status,
4335 get_wqe_reqtag(rspiocbp),
4336 get_job_ulpcontext(phba, rspiocbp));
4337 }
4338 break;
4339 }
4340
4341 if (free_saveq) {
4342 list_for_each_entry_safe(rspiocbp, next_iocb,
4343 &saveq->list, list) {
4344 list_del_init(&rspiocbp->list);
4345 __lpfc_sli_release_iocbq(phba, rspiocbp);
4346 }
4347 __lpfc_sli_release_iocbq(phba, saveq);
4348 }
4349 rspiocbp = NULL;
4350 spin_unlock_irqrestore(&phba->hbalock, iflag);
4351 return rspiocbp;
4352}
4353
4354/**
4355 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
4356 * @phba: Pointer to HBA context object.
4357 * @pring: Pointer to driver SLI ring object.
4358 * @mask: Host attention register mask for this ring.
4359 *
4360 * This routine wraps the actual slow_ring event process routine from the
4361 * API jump table function pointer from the lpfc_hba struct.
4362 **/
4363void
4364lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4365 struct lpfc_sli_ring *pring, uint32_t mask)
4366{
4367 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4368}
4369
4370/**
4371 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4372 * @phba: Pointer to HBA context object.
4373 * @pring: Pointer to driver SLI ring object.
4374 * @mask: Host attention register mask for this ring.
4375 *
4376 * This function is called from the worker thread when there is a ring event
4377 * for non-fcp rings. The caller does not hold any lock. The function will
4378 * remove each response iocb in the response ring and calls the handle
4379 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4380 **/
4381static void
4382lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4383 struct lpfc_sli_ring *pring, uint32_t mask)
4384{
4385 struct lpfc_pgp *pgp;
4386 IOCB_t *entry;
4387 IOCB_t *irsp = NULL;
4388 struct lpfc_iocbq *rspiocbp = NULL;
4389 uint32_t portRspPut, portRspMax;
4390 unsigned long iflag;
4391 uint32_t status;
4392
4393 pgp = &phba->port_gp[pring->ringno];
4394 spin_lock_irqsave(&phba->hbalock, iflag);
4395 pring->stats.iocb_event++;
4396
4397 /*
4398 * The next available response entry should never exceed the maximum
4399 * entries. If it does, treat it as an adapter hardware error.
4400 */
4401 portRspMax = pring->sli.sli3.numRiocb;
4402 portRspPut = le32_to_cpu(pgp->rspPutInx);
4403 if (portRspPut >= portRspMax) {
4404 /*
4405 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4406 * rsp ring <portRspMax>
4407 */
4408 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4409 "0303 Ring %d handler: portRspPut %d "
4410 "is bigger than rsp ring %d\n",
4411 pring->ringno, portRspPut, portRspMax);
4412
4413 phba->link_state = LPFC_HBA_ERROR;
4414 spin_unlock_irqrestore(&phba->hbalock, iflag);
4415
4416 phba->work_hs = HS_FFER3;
4417 lpfc_handle_eratt(phba);
4418
4419 return;
4420 }
4421
4422 rmb();
4423 while (pring->sli.sli3.rspidx != portRspPut) {
4424 /*
4425 * Build a completion list and call the appropriate handler.
4426 * The process is to get the next available response iocb, get
4427 * a free iocb from the list, copy the response data into the
4428 * free iocb, insert to the continuation list, and update the
4429 * next response index to slim. This process makes response
4430 * iocb's in the ring available to DMA as fast as possible but
4431 * pays a penalty for a copy operation. Since the iocb is
4432 * only 32 bytes, this penalty is considered small relative to
4433 * the PCI reads for register values and a slim write. When
4434 * the ulpLe field is set, the entire Command has been
4435 * received.
4436 */
4437 entry = lpfc_resp_iocb(phba, pring);
4438
4439 phba->last_completion_time = jiffies;
4440 rspiocbp = __lpfc_sli_get_iocbq(phba);
4441 if (rspiocbp == NULL) {
4442 printk(KERN_ERR "%s: out of buffers! Failing "
4443 "completion.\n", __func__);
4444 break;
4445 }
4446
4447 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4448 phba->iocb_rsp_size);
4449 irsp = &rspiocbp->iocb;
4450
4451 if (++pring->sli.sli3.rspidx >= portRspMax)
4452 pring->sli.sli3.rspidx = 0;
4453
4454 if (pring->ringno == LPFC_ELS_RING) {
4455 lpfc_debugfs_slow_ring_trc(phba,
4456 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
4457 *(((uint32_t *) irsp) + 4),
4458 *(((uint32_t *) irsp) + 6),
4459 *(((uint32_t *) irsp) + 7));
4460 }
4461
4462 writel(pring->sli.sli3.rspidx,
4463 &phba->host_gp[pring->ringno].rspGetInx);
4464
4465 spin_unlock_irqrestore(&phba->hbalock, iflag);
4466 /* Handle the response IOCB */
4467 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4468 spin_lock_irqsave(&phba->hbalock, iflag);
4469
4470 /*
4471 * If the port response put pointer has not been updated, sync
4472 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4473 * response put pointer.
4474 */
4475 if (pring->sli.sli3.rspidx == portRspPut) {
4476 portRspPut = le32_to_cpu(pgp->rspPutInx);
4477 }
4478 } /* while (pring->sli.sli3.rspidx != portRspPut) */
4479
4480 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4481 /* At least one response entry has been freed */
4482 pring->stats.iocb_rsp_full++;
4483 /* SET RxRE_RSP in Chip Att register */
4484 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4485 writel(status, phba->CAregaddr);
4486 readl(phba->CAregaddr); /* flush */
4487 }
4488 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4489 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4490 pring->stats.iocb_cmd_empty++;
4491
4492 /* Force update of the local copy of cmdGetInx */
4493 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4494 lpfc_sli_resume_iocb(phba, pring);
4495
4496 if ((pring->lpfc_sli_cmd_available))
4497 (pring->lpfc_sli_cmd_available) (phba, pring);
4498
4499 }
4500
4501 spin_unlock_irqrestore(&phba->hbalock, iflag);
4502 return;
4503}
4504
4505/**
4506 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4507 * @phba: Pointer to HBA context object.
4508 * @pring: Pointer to driver SLI ring object.
4509 * @mask: Host attention register mask for this ring.
4510 *
4511 * This function is called from the worker thread when there is a pending
4512 * ELS response iocb on the driver internal slow-path response iocb worker
4513 * queue. The caller does not hold any lock. The function will remove each
4514 * response iocb from the response worker queue and calls the handle
4515 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4516 **/
4517static void
4518lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4519 struct lpfc_sli_ring *pring, uint32_t mask)
4520{
4521 struct lpfc_iocbq *irspiocbq;
4522 struct hbq_dmabuf *dmabuf;
4523 struct lpfc_cq_event *cq_event;
4524 unsigned long iflag;
4525 int count = 0;
4526
4527 clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
4528 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4529 /* Get the response iocb from the head of work queue */
4530 spin_lock_irqsave(&phba->hbalock, iflag);
4531 list_remove_head(&phba->sli4_hba.sp_queue_event,
4532 cq_event, struct lpfc_cq_event, list);
4533 spin_unlock_irqrestore(&phba->hbalock, iflag);
4534
4535 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4536 case CQE_CODE_COMPL_WQE:
4537 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4538 cq_event);
4539 /* Translate ELS WCQE to response IOCBQ */
4540 irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
4541 irspiocbq);
4542 if (irspiocbq)
4543 lpfc_sli_sp_handle_rspiocb(phba, pring,
4544 irspiocbq);
4545 count++;
4546 break;
4547 case CQE_CODE_RECEIVE:
4548 case CQE_CODE_RECEIVE_V1:
4549 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4550 cq_event);
4551 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4552 count++;
4553 break;
4554 default:
4555 break;
4556 }
4557
4558 /* Limit the number of events to 64 to avoid soft lockups */
4559 if (count == 64)
4560 break;
4561 }
4562}
4563
4564/**
4565 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4566 * @phba: Pointer to HBA context object.
4567 * @pring: Pointer to driver SLI ring object.
4568 *
4569 * This function aborts all iocbs in the given ring and frees all the iocb
4570 * objects in txq. This function issues an abort iocb for all the iocb commands
4571 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4572 * the return of this function. The caller is not required to hold any locks.
4573 **/
4574void
4575lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4576{
4577 LIST_HEAD(tx_completions);
4578 LIST_HEAD(txcmplq_completions);
4579 struct lpfc_iocbq *iocb, *next_iocb;
4580 int offline;
4581
4582 if (pring->ringno == LPFC_ELS_RING) {
4583 lpfc_fabric_abort_hba(phba);
4584 }
4585 offline = pci_channel_offline(phba->pcidev);
4586
4587 /* Error everything on txq and txcmplq
4588 * First do the txq.
4589 */
4590 if (phba->sli_rev >= LPFC_SLI_REV4) {
4591 spin_lock_irq(&pring->ring_lock);
4592 list_splice_init(&pring->txq, &tx_completions);
4593 pring->txq_cnt = 0;
4594
4595 if (offline) {
4596 list_splice_init(&pring->txcmplq,
4597 &txcmplq_completions);
4598 } else {
4599 /* Next issue ABTS for everything on the txcmplq */
4600 list_for_each_entry_safe(iocb, next_iocb,
4601 &pring->txcmplq, list)
4602 lpfc_sli_issue_abort_iotag(phba, pring,
4603 iocb, NULL);
4604 }
4605 spin_unlock_irq(&pring->ring_lock);
4606 } else {
4607 spin_lock_irq(&phba->hbalock);
4608 list_splice_init(&pring->txq, &tx_completions);
4609 pring->txq_cnt = 0;
4610
4611 if (offline) {
4612 list_splice_init(&pring->txcmplq, &txcmplq_completions);
4613 } else {
4614 /* Next issue ABTS for everything on the txcmplq */
4615 list_for_each_entry_safe(iocb, next_iocb,
4616 &pring->txcmplq, list)
4617 lpfc_sli_issue_abort_iotag(phba, pring,
4618 iocb, NULL);
4619 }
4620 spin_unlock_irq(&phba->hbalock);
4621 }
4622
4623 if (offline) {
4624 /* Cancel all the IOCBs from the completions list */
4625 lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
4626 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
4627 } else {
4628 /* Make sure HBA is alive */
4629 lpfc_issue_hb_tmo(phba);
4630 }
4631 /* Cancel all the IOCBs from the completions list */
4632 lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
4633 IOERR_SLI_ABORTED);
4634}
4635
4636/**
4637 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4638 * @phba: Pointer to HBA context object.
4639 *
4640 * This function aborts all iocbs in FCP rings and frees all the iocb
4641 * objects in txq. This function issues an abort iocb for all the iocb commands
4642 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4643 * the return of this function. The caller is not required to hold any locks.
4644 **/
4645void
4646lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4647{
4648 struct lpfc_sli *psli = &phba->sli;
4649 struct lpfc_sli_ring *pring;
4650 uint32_t i;
4651
4652 /* Look on all the FCP Rings for the iotag */
4653 if (phba->sli_rev >= LPFC_SLI_REV4) {
4654 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4655 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4656 lpfc_sli_abort_iocb_ring(phba, pring);
4657 }
4658 } else {
4659 pring = &psli->sli3_ring[LPFC_FCP_RING];
4660 lpfc_sli_abort_iocb_ring(phba, pring);
4661 }
4662}
4663
4664/**
4665 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4666 * @phba: Pointer to HBA context object.
4667 *
4668 * This function flushes all iocbs in the IO ring and frees all the iocb
4669 * objects in txq and txcmplq. This function will not issue abort iocbs
4670 * for all the iocb commands in txcmplq, they will just be returned with
4671 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4672 * slot has been permanently disabled.
4673 **/
4674void
4675lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4676{
4677 LIST_HEAD(txq);
4678 LIST_HEAD(txcmplq);
4679 struct lpfc_sli *psli = &phba->sli;
4680 struct lpfc_sli_ring *pring;
4681 uint32_t i;
4682 struct lpfc_iocbq *piocb, *next_iocb;
4683
4684 /* Indicate the I/O queues are flushed */
4685 set_bit(HBA_IOQ_FLUSH, &phba->hba_flag);
4686
4687 /* Look on all the FCP Rings for the iotag */
4688 if (phba->sli_rev >= LPFC_SLI_REV4) {
4689 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4690 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4691
4692 spin_lock_irq(&pring->ring_lock);
4693 /* Retrieve everything on txq */
4694 list_splice_init(&pring->txq, &txq);
4695 list_for_each_entry_safe(piocb, next_iocb,
4696 &pring->txcmplq, list)
4697 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4698 /* Retrieve everything on the txcmplq */
4699 list_splice_init(&pring->txcmplq, &txcmplq);
4700 pring->txq_cnt = 0;
4701 pring->txcmplq_cnt = 0;
4702 spin_unlock_irq(&pring->ring_lock);
4703
4704 /* Flush the txq */
4705 lpfc_sli_cancel_iocbs(phba, &txq,
4706 IOSTAT_LOCAL_REJECT,
4707 IOERR_SLI_DOWN);
4708 /* Flush the txcmplq */
4709 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4710 IOSTAT_LOCAL_REJECT,
4711 IOERR_SLI_DOWN);
4712 if (unlikely(pci_channel_offline(phba->pcidev)))
4713 lpfc_sli4_io_xri_aborted(phba, NULL, 0);
4714 }
4715 } else {
4716 pring = &psli->sli3_ring[LPFC_FCP_RING];
4717
4718 spin_lock_irq(&phba->hbalock);
4719 /* Retrieve everything on txq */
4720 list_splice_init(&pring->txq, &txq);
4721 list_for_each_entry_safe(piocb, next_iocb,
4722 &pring->txcmplq, list)
4723 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4724 /* Retrieve everything on the txcmplq */
4725 list_splice_init(&pring->txcmplq, &txcmplq);
4726 pring->txq_cnt = 0;
4727 pring->txcmplq_cnt = 0;
4728 spin_unlock_irq(&phba->hbalock);
4729
4730 /* Flush the txq */
4731 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4732 IOERR_SLI_DOWN);
4733 /* Flush the txcmpq */
4734 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4735 IOERR_SLI_DOWN);
4736 }
4737}
4738
4739/**
4740 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4741 * @phba: Pointer to HBA context object.
4742 * @mask: Bit mask to be checked.
4743 *
4744 * This function reads the host status register and compares
4745 * with the provided bit mask to check if HBA completed
4746 * the restart. This function will wait in a loop for the
4747 * HBA to complete restart. If the HBA does not restart within
4748 * 15 iterations, the function will reset the HBA again. The
4749 * function returns 1 when HBA fail to restart otherwise returns
4750 * zero.
4751 **/
4752static int
4753lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4754{
4755 uint32_t status;
4756 int i = 0;
4757 int retval = 0;
4758
4759 /* Read the HBA Host Status Register */
4760 if (lpfc_readl(phba->HSregaddr, &status))
4761 return 1;
4762
4763 set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
4764
4765 /*
4766 * Check status register every 100ms for 5 retries, then every
4767 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4768 * every 2.5 sec for 4.
4769 * Break our of the loop if errors occurred during init.
4770 */
4771 while (((status & mask) != mask) &&
4772 !(status & HS_FFERM) &&
4773 i++ < 20) {
4774
4775 if (i <= 5)
4776 msleep(10);
4777 else if (i <= 10)
4778 msleep(500);
4779 else
4780 msleep(2500);
4781
4782 if (i == 15) {
4783 /* Do post */
4784 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4785 lpfc_sli_brdrestart(phba);
4786 }
4787 /* Read the HBA Host Status Register */
4788 if (lpfc_readl(phba->HSregaddr, &status)) {
4789 retval = 1;
4790 break;
4791 }
4792 }
4793
4794 /* Check to see if any errors occurred during init */
4795 if ((status & HS_FFERM) || (i >= 20)) {
4796 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4797 "2751 Adapter failed to restart, "
4798 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4799 status,
4800 readl(phba->MBslimaddr + 0xa8),
4801 readl(phba->MBslimaddr + 0xac));
4802 phba->link_state = LPFC_HBA_ERROR;
4803 retval = 1;
4804 }
4805
4806 return retval;
4807}
4808
4809/**
4810 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4811 * @phba: Pointer to HBA context object.
4812 * @mask: Bit mask to be checked.
4813 *
4814 * This function checks the host status register to check if HBA is
4815 * ready. This function will wait in a loop for the HBA to be ready
4816 * If the HBA is not ready , the function will will reset the HBA PCI
4817 * function again. The function returns 1 when HBA fail to be ready
4818 * otherwise returns zero.
4819 **/
4820static int
4821lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4822{
4823 uint32_t status;
4824 int retval = 0;
4825
4826 /* Read the HBA Host Status Register */
4827 status = lpfc_sli4_post_status_check(phba);
4828
4829 if (status) {
4830 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4831 lpfc_sli_brdrestart(phba);
4832 status = lpfc_sli4_post_status_check(phba);
4833 }
4834
4835 /* Check to see if any errors occurred during init */
4836 if (status) {
4837 phba->link_state = LPFC_HBA_ERROR;
4838 retval = 1;
4839 } else
4840 phba->sli4_hba.intr_enable = 0;
4841
4842 clear_bit(HBA_SETUP, &phba->hba_flag);
4843 return retval;
4844}
4845
4846/**
4847 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4848 * @phba: Pointer to HBA context object.
4849 * @mask: Bit mask to be checked.
4850 *
4851 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4852 * from the API jump table function pointer from the lpfc_hba struct.
4853 **/
4854int
4855lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4856{
4857 return phba->lpfc_sli_brdready(phba, mask);
4858}
4859
4860#define BARRIER_TEST_PATTERN (0xdeadbeef)
4861
4862/**
4863 * lpfc_reset_barrier - Make HBA ready for HBA reset
4864 * @phba: Pointer to HBA context object.
4865 *
4866 * This function is called before resetting an HBA. This function is called
4867 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4868 **/
4869void lpfc_reset_barrier(struct lpfc_hba *phba)
4870{
4871 uint32_t __iomem *resp_buf;
4872 uint32_t __iomem *mbox_buf;
4873 volatile struct MAILBOX_word0 mbox;
4874 uint32_t hc_copy, ha_copy, resp_data;
4875 int i;
4876 uint8_t hdrtype;
4877
4878 lockdep_assert_held(&phba->hbalock);
4879
4880 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4881 if (hdrtype != PCI_HEADER_TYPE_MFD ||
4882 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4883 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4884 return;
4885
4886 /*
4887 * Tell the other part of the chip to suspend temporarily all
4888 * its DMA activity.
4889 */
4890 resp_buf = phba->MBslimaddr;
4891
4892 /* Disable the error attention */
4893 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4894 return;
4895 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4896 readl(phba->HCregaddr); /* flush */
4897 phba->link_flag |= LS_IGNORE_ERATT;
4898
4899 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4900 return;
4901 if (ha_copy & HA_ERATT) {
4902 /* Clear Chip error bit */
4903 writel(HA_ERATT, phba->HAregaddr);
4904 phba->pport->stopped = 1;
4905 }
4906
4907 mbox.word0 = 0;
4908 mbox.mbxCommand = MBX_KILL_BOARD;
4909 mbox.mbxOwner = OWN_CHIP;
4910
4911 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4912 mbox_buf = phba->MBslimaddr;
4913 writel(mbox.word0, mbox_buf);
4914
4915 for (i = 0; i < 50; i++) {
4916 if (lpfc_readl((resp_buf + 1), &resp_data))
4917 return;
4918 if (resp_data != ~(BARRIER_TEST_PATTERN))
4919 mdelay(1);
4920 else
4921 break;
4922 }
4923 resp_data = 0;
4924 if (lpfc_readl((resp_buf + 1), &resp_data))
4925 return;
4926 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4927 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4928 phba->pport->stopped)
4929 goto restore_hc;
4930 else
4931 goto clear_errat;
4932 }
4933
4934 mbox.mbxOwner = OWN_HOST;
4935 resp_data = 0;
4936 for (i = 0; i < 500; i++) {
4937 if (lpfc_readl(resp_buf, &resp_data))
4938 return;
4939 if (resp_data != mbox.word0)
4940 mdelay(1);
4941 else
4942 break;
4943 }
4944
4945clear_errat:
4946
4947 while (++i < 500) {
4948 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4949 return;
4950 if (!(ha_copy & HA_ERATT))
4951 mdelay(1);
4952 else
4953 break;
4954 }
4955
4956 if (readl(phba->HAregaddr) & HA_ERATT) {
4957 writel(HA_ERATT, phba->HAregaddr);
4958 phba->pport->stopped = 1;
4959 }
4960
4961restore_hc:
4962 phba->link_flag &= ~LS_IGNORE_ERATT;
4963 writel(hc_copy, phba->HCregaddr);
4964 readl(phba->HCregaddr); /* flush */
4965}
4966
4967/**
4968 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4969 * @phba: Pointer to HBA context object.
4970 *
4971 * This function issues a kill_board mailbox command and waits for
4972 * the error attention interrupt. This function is called for stopping
4973 * the firmware processing. The caller is not required to hold any
4974 * locks. This function calls lpfc_hba_down_post function to free
4975 * any pending commands after the kill. The function will return 1 when it
4976 * fails to kill the board else will return 0.
4977 **/
4978int
4979lpfc_sli_brdkill(struct lpfc_hba *phba)
4980{
4981 struct lpfc_sli *psli;
4982 LPFC_MBOXQ_t *pmb;
4983 uint32_t status;
4984 uint32_t ha_copy;
4985 int retval;
4986 int i = 0;
4987
4988 psli = &phba->sli;
4989
4990 /* Kill HBA */
4991 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4992 "0329 Kill HBA Data: x%x x%x\n",
4993 phba->pport->port_state, psli->sli_flag);
4994
4995 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4996 if (!pmb)
4997 return 1;
4998
4999 /* Disable the error attention */
5000 spin_lock_irq(&phba->hbalock);
5001 if (lpfc_readl(phba->HCregaddr, &status)) {
5002 spin_unlock_irq(&phba->hbalock);
5003 mempool_free(pmb, phba->mbox_mem_pool);
5004 return 1;
5005 }
5006 status &= ~HC_ERINT_ENA;
5007 writel(status, phba->HCregaddr);
5008 readl(phba->HCregaddr); /* flush */
5009 phba->link_flag |= LS_IGNORE_ERATT;
5010 spin_unlock_irq(&phba->hbalock);
5011
5012 lpfc_kill_board(phba, pmb);
5013 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5014 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5015
5016 if (retval != MBX_SUCCESS) {
5017 if (retval != MBX_BUSY)
5018 mempool_free(pmb, phba->mbox_mem_pool);
5019 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5020 "2752 KILL_BOARD command failed retval %d\n",
5021 retval);
5022 spin_lock_irq(&phba->hbalock);
5023 phba->link_flag &= ~LS_IGNORE_ERATT;
5024 spin_unlock_irq(&phba->hbalock);
5025 return 1;
5026 }
5027
5028 spin_lock_irq(&phba->hbalock);
5029 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
5030 spin_unlock_irq(&phba->hbalock);
5031
5032 mempool_free(pmb, phba->mbox_mem_pool);
5033
5034 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
5035 * attention every 100ms for 3 seconds. If we don't get ERATT after
5036 * 3 seconds we still set HBA_ERROR state because the status of the
5037 * board is now undefined.
5038 */
5039 if (lpfc_readl(phba->HAregaddr, &ha_copy))
5040 return 1;
5041 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
5042 mdelay(100);
5043 if (lpfc_readl(phba->HAregaddr, &ha_copy))
5044 return 1;
5045 }
5046
5047 del_timer_sync(&psli->mbox_tmo);
5048 if (ha_copy & HA_ERATT) {
5049 writel(HA_ERATT, phba->HAregaddr);
5050 phba->pport->stopped = 1;
5051 }
5052 spin_lock_irq(&phba->hbalock);
5053 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5054 psli->mbox_active = NULL;
5055 phba->link_flag &= ~LS_IGNORE_ERATT;
5056 spin_unlock_irq(&phba->hbalock);
5057
5058 lpfc_hba_down_post(phba);
5059 phba->link_state = LPFC_HBA_ERROR;
5060
5061 return ha_copy & HA_ERATT ? 0 : 1;
5062}
5063
5064/**
5065 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
5066 * @phba: Pointer to HBA context object.
5067 *
5068 * This function resets the HBA by writing HC_INITFF to the control
5069 * register. After the HBA resets, this function resets all the iocb ring
5070 * indices. This function disables PCI layer parity checking during
5071 * the reset.
5072 * This function returns 0 always.
5073 * The caller is not required to hold any locks.
5074 **/
5075int
5076lpfc_sli_brdreset(struct lpfc_hba *phba)
5077{
5078 struct lpfc_sli *psli;
5079 struct lpfc_sli_ring *pring;
5080 uint16_t cfg_value;
5081 int i;
5082
5083 psli = &phba->sli;
5084
5085 /* Reset HBA */
5086 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5087 "0325 Reset HBA Data: x%x x%x\n",
5088 (phba->pport) ? phba->pport->port_state : 0,
5089 psli->sli_flag);
5090
5091 /* perform board reset */
5092 phba->fc_eventTag = 0;
5093 phba->link_events = 0;
5094 set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
5095 if (phba->pport) {
5096 phba->pport->fc_myDID = 0;
5097 phba->pport->fc_prevDID = 0;
5098 }
5099
5100 /* Turn off parity checking and serr during the physical reset */
5101 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
5102 return -EIO;
5103
5104 pci_write_config_word(phba->pcidev, PCI_COMMAND,
5105 (cfg_value &
5106 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5107
5108 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
5109
5110 /* Now toggle INITFF bit in the Host Control Register */
5111 writel(HC_INITFF, phba->HCregaddr);
5112 mdelay(1);
5113 readl(phba->HCregaddr); /* flush */
5114 writel(0, phba->HCregaddr);
5115 readl(phba->HCregaddr); /* flush */
5116
5117 /* Restore PCI cmd register */
5118 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5119
5120 /* Initialize relevant SLI info */
5121 for (i = 0; i < psli->num_rings; i++) {
5122 pring = &psli->sli3_ring[i];
5123 pring->flag = 0;
5124 pring->sli.sli3.rspidx = 0;
5125 pring->sli.sli3.next_cmdidx = 0;
5126 pring->sli.sli3.local_getidx = 0;
5127 pring->sli.sli3.cmdidx = 0;
5128 pring->missbufcnt = 0;
5129 }
5130
5131 phba->link_state = LPFC_WARM_START;
5132 return 0;
5133}
5134
5135/**
5136 * lpfc_sli4_brdreset - Reset a sli-4 HBA
5137 * @phba: Pointer to HBA context object.
5138 *
5139 * This function resets a SLI4 HBA. This function disables PCI layer parity
5140 * checking during resets the device. The caller is not required to hold
5141 * any locks.
5142 *
5143 * This function returns 0 on success else returns negative error code.
5144 **/
5145int
5146lpfc_sli4_brdreset(struct lpfc_hba *phba)
5147{
5148 struct lpfc_sli *psli = &phba->sli;
5149 uint16_t cfg_value;
5150 int rc = 0;
5151
5152 /* Reset HBA */
5153 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5154 "0295 Reset HBA Data: x%x x%x x%lx\n",
5155 phba->pport->port_state, psli->sli_flag,
5156 phba->hba_flag);
5157
5158 /* perform board reset */
5159 phba->fc_eventTag = 0;
5160 phba->link_events = 0;
5161 phba->pport->fc_myDID = 0;
5162 phba->pport->fc_prevDID = 0;
5163 clear_bit(HBA_SETUP, &phba->hba_flag);
5164
5165 spin_lock_irq(&phba->hbalock);
5166 psli->sli_flag &= ~(LPFC_PROCESS_LA);
5167 phba->fcf.fcf_flag = 0;
5168 spin_unlock_irq(&phba->hbalock);
5169
5170 /* Now physically reset the device */
5171 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5172 "0389 Performing PCI function reset!\n");
5173
5174 /* Turn off parity checking and serr during the physical reset */
5175 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
5176 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5177 "3205 PCI read Config failed\n");
5178 return -EIO;
5179 }
5180
5181 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
5182 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5183
5184 /* Perform FCoE PCI function reset before freeing queue memory */
5185 rc = lpfc_pci_function_reset(phba);
5186
5187 /* Restore PCI cmd register */
5188 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5189
5190 return rc;
5191}
5192
5193/**
5194 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
5195 * @phba: Pointer to HBA context object.
5196 *
5197 * This function is called in the SLI initialization code path to
5198 * restart the HBA. The caller is not required to hold any lock.
5199 * This function writes MBX_RESTART mailbox command to the SLIM and
5200 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
5201 * function to free any pending commands. The function enables
5202 * POST only during the first initialization. The function returns zero.
5203 * The function does not guarantee completion of MBX_RESTART mailbox
5204 * command before the return of this function.
5205 **/
5206static int
5207lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
5208{
5209 volatile struct MAILBOX_word0 mb;
5210 struct lpfc_sli *psli;
5211 void __iomem *to_slim;
5212
5213 spin_lock_irq(&phba->hbalock);
5214
5215 psli = &phba->sli;
5216
5217 /* Restart HBA */
5218 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5219 "0337 Restart HBA Data: x%x x%x\n",
5220 (phba->pport) ? phba->pport->port_state : 0,
5221 psli->sli_flag);
5222
5223 mb.word0 = 0;
5224 mb.mbxCommand = MBX_RESTART;
5225 mb.mbxHc = 1;
5226
5227 lpfc_reset_barrier(phba);
5228
5229 to_slim = phba->MBslimaddr;
5230 writel(mb.word0, to_slim);
5231 readl(to_slim); /* flush */
5232
5233 /* Only skip post after fc_ffinit is completed */
5234 if (phba->pport && phba->pport->port_state)
5235 mb.word0 = 1; /* This is really setting up word1 */
5236 else
5237 mb.word0 = 0; /* This is really setting up word1 */
5238 to_slim = phba->MBslimaddr + sizeof (uint32_t);
5239 writel(mb.word0, to_slim);
5240 readl(to_slim); /* flush */
5241
5242 lpfc_sli_brdreset(phba);
5243 if (phba->pport)
5244 phba->pport->stopped = 0;
5245 phba->link_state = LPFC_INIT_START;
5246 phba->hba_flag = 0;
5247 spin_unlock_irq(&phba->hbalock);
5248
5249 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5250 psli->stats_start = ktime_get_seconds();
5251
5252 /* Give the INITFF and Post time to settle. */
5253 mdelay(100);
5254
5255 lpfc_hba_down_post(phba);
5256
5257 return 0;
5258}
5259
5260/**
5261 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
5262 * @phba: Pointer to HBA context object.
5263 *
5264 * This function is called in the SLI initialization code path to restart
5265 * a SLI4 HBA. The caller is not required to hold any lock.
5266 * At the end of the function, it calls lpfc_hba_down_post function to
5267 * free any pending commands.
5268 **/
5269static int
5270lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
5271{
5272 struct lpfc_sli *psli = &phba->sli;
5273 int rc;
5274
5275 /* Restart HBA */
5276 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5277 "0296 Restart HBA Data: x%x x%x\n",
5278 phba->pport->port_state, psli->sli_flag);
5279
5280 rc = lpfc_sli4_brdreset(phba);
5281 if (rc) {
5282 phba->link_state = LPFC_HBA_ERROR;
5283 goto hba_down_queue;
5284 }
5285
5286 spin_lock_irq(&phba->hbalock);
5287 phba->pport->stopped = 0;
5288 phba->link_state = LPFC_INIT_START;
5289 phba->hba_flag = 0;
5290 /* Preserve FA-PWWN expectation */
5291 phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
5292 spin_unlock_irq(&phba->hbalock);
5293
5294 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5295 psli->stats_start = ktime_get_seconds();
5296
5297hba_down_queue:
5298 lpfc_hba_down_post(phba);
5299 lpfc_sli4_queue_destroy(phba);
5300
5301 return rc;
5302}
5303
5304/**
5305 * lpfc_sli_brdrestart - Wrapper func for restarting hba
5306 * @phba: Pointer to HBA context object.
5307 *
5308 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
5309 * API jump table function pointer from the lpfc_hba struct.
5310**/
5311int
5312lpfc_sli_brdrestart(struct lpfc_hba *phba)
5313{
5314 return phba->lpfc_sli_brdrestart(phba);
5315}
5316
5317/**
5318 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
5319 * @phba: Pointer to HBA context object.
5320 *
5321 * This function is called after a HBA restart to wait for successful
5322 * restart of the HBA. Successful restart of the HBA is indicated by
5323 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
5324 * iteration, the function will restart the HBA again. The function returns
5325 * zero if HBA successfully restarted else returns negative error code.
5326 **/
5327int
5328lpfc_sli_chipset_init(struct lpfc_hba *phba)
5329{
5330 uint32_t status, i = 0;
5331
5332 /* Read the HBA Host Status Register */
5333 if (lpfc_readl(phba->HSregaddr, &status))
5334 return -EIO;
5335
5336 /* Check status register to see what current state is */
5337 i = 0;
5338 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
5339
5340 /* Check every 10ms for 10 retries, then every 100ms for 90
5341 * retries, then every 1 sec for 50 retires for a total of
5342 * ~60 seconds before reset the board again and check every
5343 * 1 sec for 50 retries. The up to 60 seconds before the
5344 * board ready is required by the Falcon FIPS zeroization
5345 * complete, and any reset the board in between shall cause
5346 * restart of zeroization, further delay the board ready.
5347 */
5348 if (i++ >= 200) {
5349 /* Adapter failed to init, timeout, status reg
5350 <status> */
5351 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5352 "0436 Adapter failed to init, "
5353 "timeout, status reg x%x, "
5354 "FW Data: A8 x%x AC x%x\n", status,
5355 readl(phba->MBslimaddr + 0xa8),
5356 readl(phba->MBslimaddr + 0xac));
5357 phba->link_state = LPFC_HBA_ERROR;
5358 return -ETIMEDOUT;
5359 }
5360
5361 /* Check to see if any errors occurred during init */
5362 if (status & HS_FFERM) {
5363 /* ERROR: During chipset initialization */
5364 /* Adapter failed to init, chipset, status reg
5365 <status> */
5366 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5367 "0437 Adapter failed to init, "
5368 "chipset, status reg x%x, "
5369 "FW Data: A8 x%x AC x%x\n", status,
5370 readl(phba->MBslimaddr + 0xa8),
5371 readl(phba->MBslimaddr + 0xac));
5372 phba->link_state = LPFC_HBA_ERROR;
5373 return -EIO;
5374 }
5375
5376 if (i <= 10)
5377 msleep(10);
5378 else if (i <= 100)
5379 msleep(100);
5380 else
5381 msleep(1000);
5382
5383 if (i == 150) {
5384 /* Do post */
5385 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5386 lpfc_sli_brdrestart(phba);
5387 }
5388 /* Read the HBA Host Status Register */
5389 if (lpfc_readl(phba->HSregaddr, &status))
5390 return -EIO;
5391 }
5392
5393 /* Check to see if any errors occurred during init */
5394 if (status & HS_FFERM) {
5395 /* ERROR: During chipset initialization */
5396 /* Adapter failed to init, chipset, status reg <status> */
5397 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5398 "0438 Adapter failed to init, chipset, "
5399 "status reg x%x, "
5400 "FW Data: A8 x%x AC x%x\n", status,
5401 readl(phba->MBslimaddr + 0xa8),
5402 readl(phba->MBslimaddr + 0xac));
5403 phba->link_state = LPFC_HBA_ERROR;
5404 return -EIO;
5405 }
5406
5407 set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
5408
5409 /* Clear all interrupt enable conditions */
5410 writel(0, phba->HCregaddr);
5411 readl(phba->HCregaddr); /* flush */
5412
5413 /* setup host attn register */
5414 writel(0xffffffff, phba->HAregaddr);
5415 readl(phba->HAregaddr); /* flush */
5416 return 0;
5417}
5418
5419/**
5420 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5421 *
5422 * This function calculates and returns the number of HBQs required to be
5423 * configured.
5424 **/
5425int
5426lpfc_sli_hbq_count(void)
5427{
5428 return ARRAY_SIZE(lpfc_hbq_defs);
5429}
5430
5431/**
5432 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5433 *
5434 * This function adds the number of hbq entries in every HBQ to get
5435 * the total number of hbq entries required for the HBA and returns
5436 * the total count.
5437 **/
5438static int
5439lpfc_sli_hbq_entry_count(void)
5440{
5441 int hbq_count = lpfc_sli_hbq_count();
5442 int count = 0;
5443 int i;
5444
5445 for (i = 0; i < hbq_count; ++i)
5446 count += lpfc_hbq_defs[i]->entry_count;
5447 return count;
5448}
5449
5450/**
5451 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5452 *
5453 * This function calculates amount of memory required for all hbq entries
5454 * to be configured and returns the total memory required.
5455 **/
5456int
5457lpfc_sli_hbq_size(void)
5458{
5459 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5460}
5461
5462/**
5463 * lpfc_sli_hbq_setup - configure and initialize HBQs
5464 * @phba: Pointer to HBA context object.
5465 *
5466 * This function is called during the SLI initialization to configure
5467 * all the HBQs and post buffers to the HBQ. The caller is not
5468 * required to hold any locks. This function will return zero if successful
5469 * else it will return negative error code.
5470 **/
5471static int
5472lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5473{
5474 int hbq_count = lpfc_sli_hbq_count();
5475 LPFC_MBOXQ_t *pmb;
5476 MAILBOX_t *pmbox;
5477 uint32_t hbqno;
5478 uint32_t hbq_entry_index;
5479
5480 /* Get a Mailbox buffer to setup mailbox
5481 * commands for HBA initialization
5482 */
5483 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5484
5485 if (!pmb)
5486 return -ENOMEM;
5487
5488 pmbox = &pmb->u.mb;
5489
5490 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5491 phba->link_state = LPFC_INIT_MBX_CMDS;
5492 phba->hbq_in_use = 1;
5493
5494 hbq_entry_index = 0;
5495 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5496 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5497 phba->hbqs[hbqno].hbqPutIdx = 0;
5498 phba->hbqs[hbqno].local_hbqGetIdx = 0;
5499 phba->hbqs[hbqno].entry_count =
5500 lpfc_hbq_defs[hbqno]->entry_count;
5501 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5502 hbq_entry_index, pmb);
5503 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5504
5505 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5506 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5507 mbxStatus <status>, ring <num> */
5508
5509 lpfc_printf_log(phba, KERN_ERR,
5510 LOG_SLI | LOG_VPORT,
5511 "1805 Adapter failed to init. "
5512 "Data: x%x x%x x%x\n",
5513 pmbox->mbxCommand,
5514 pmbox->mbxStatus, hbqno);
5515
5516 phba->link_state = LPFC_HBA_ERROR;
5517 mempool_free(pmb, phba->mbox_mem_pool);
5518 return -ENXIO;
5519 }
5520 }
5521 phba->hbq_count = hbq_count;
5522
5523 mempool_free(pmb, phba->mbox_mem_pool);
5524
5525 /* Initially populate or replenish the HBQs */
5526 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5527 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5528 return 0;
5529}
5530
5531/**
5532 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5533 * @phba: Pointer to HBA context object.
5534 *
5535 * This function is called during the SLI initialization to configure
5536 * all the HBQs and post buffers to the HBQ. The caller is not
5537 * required to hold any locks. This function will return zero if successful
5538 * else it will return negative error code.
5539 **/
5540static int
5541lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5542{
5543 phba->hbq_in_use = 1;
5544 /**
5545 * Specific case when the MDS diagnostics is enabled and supported.
5546 * The receive buffer count is truncated to manage the incoming
5547 * traffic.
5548 **/
5549 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5550 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5551 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5552 else
5553 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5554 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5555 phba->hbq_count = 1;
5556 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5557 /* Initially populate or replenish the HBQs */
5558 return 0;
5559}
5560
5561/**
5562 * lpfc_sli_config_port - Issue config port mailbox command
5563 * @phba: Pointer to HBA context object.
5564 * @sli_mode: sli mode - 2/3
5565 *
5566 * This function is called by the sli initialization code path
5567 * to issue config_port mailbox command. This function restarts the
5568 * HBA firmware and issues a config_port mailbox command to configure
5569 * the SLI interface in the sli mode specified by sli_mode
5570 * variable. The caller is not required to hold any locks.
5571 * The function returns 0 if successful, else returns negative error
5572 * code.
5573 **/
5574int
5575lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5576{
5577 LPFC_MBOXQ_t *pmb;
5578 uint32_t resetcount = 0, rc = 0, done = 0;
5579
5580 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5581 if (!pmb) {
5582 phba->link_state = LPFC_HBA_ERROR;
5583 return -ENOMEM;
5584 }
5585
5586 phba->sli_rev = sli_mode;
5587 while (resetcount < 2 && !done) {
5588 spin_lock_irq(&phba->hbalock);
5589 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5590 spin_unlock_irq(&phba->hbalock);
5591 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5592 lpfc_sli_brdrestart(phba);
5593 rc = lpfc_sli_chipset_init(phba);
5594 if (rc)
5595 break;
5596
5597 spin_lock_irq(&phba->hbalock);
5598 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5599 spin_unlock_irq(&phba->hbalock);
5600 resetcount++;
5601
5602 /* Call pre CONFIG_PORT mailbox command initialization. A
5603 * value of 0 means the call was successful. Any other
5604 * nonzero value is a failure, but if ERESTART is returned,
5605 * the driver may reset the HBA and try again.
5606 */
5607 rc = lpfc_config_port_prep(phba);
5608 if (rc == -ERESTART) {
5609 phba->link_state = LPFC_LINK_UNKNOWN;
5610 continue;
5611 } else if (rc)
5612 break;
5613
5614 phba->link_state = LPFC_INIT_MBX_CMDS;
5615 lpfc_config_port(phba, pmb);
5616 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5617 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5618 LPFC_SLI3_HBQ_ENABLED |
5619 LPFC_SLI3_CRP_ENABLED |
5620 LPFC_SLI3_DSS_ENABLED);
5621 if (rc != MBX_SUCCESS) {
5622 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5623 "0442 Adapter failed to init, mbxCmd x%x "
5624 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5625 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5626 spin_lock_irq(&phba->hbalock);
5627 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5628 spin_unlock_irq(&phba->hbalock);
5629 rc = -ENXIO;
5630 } else {
5631 /* Allow asynchronous mailbox command to go through */
5632 spin_lock_irq(&phba->hbalock);
5633 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5634 spin_unlock_irq(&phba->hbalock);
5635 done = 1;
5636
5637 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5638 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5639 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5640 "3110 Port did not grant ASABT\n");
5641 }
5642 }
5643 if (!done) {
5644 rc = -EINVAL;
5645 goto do_prep_failed;
5646 }
5647 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5648 if (!pmb->u.mb.un.varCfgPort.cMA) {
5649 rc = -ENXIO;
5650 goto do_prep_failed;
5651 }
5652 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5653 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5654 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5655 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5656 phba->max_vpi : phba->max_vports;
5657
5658 } else
5659 phba->max_vpi = 0;
5660 if (pmb->u.mb.un.varCfgPort.gerbm)
5661 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5662 if (pmb->u.mb.un.varCfgPort.gcrp)
5663 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5664
5665 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5666 phba->port_gp = phba->mbox->us.s3_pgp.port;
5667
5668 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5669 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5670 phba->cfg_enable_bg = 0;
5671 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5672 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5673 "0443 Adapter did not grant "
5674 "BlockGuard\n");
5675 }
5676 }
5677 } else {
5678 phba->hbq_get = NULL;
5679 phba->port_gp = phba->mbox->us.s2.port;
5680 phba->max_vpi = 0;
5681 }
5682do_prep_failed:
5683 mempool_free(pmb, phba->mbox_mem_pool);
5684 return rc;
5685}
5686
5687
5688/**
5689 * lpfc_sli_hba_setup - SLI initialization function
5690 * @phba: Pointer to HBA context object.
5691 *
5692 * This function is the main SLI initialization function. This function
5693 * is called by the HBA initialization code, HBA reset code and HBA
5694 * error attention handler code. Caller is not required to hold any
5695 * locks. This function issues config_port mailbox command to configure
5696 * the SLI, setup iocb rings and HBQ rings. In the end the function
5697 * calls the config_port_post function to issue init_link mailbox
5698 * command and to start the discovery. The function will return zero
5699 * if successful, else it will return negative error code.
5700 **/
5701int
5702lpfc_sli_hba_setup(struct lpfc_hba *phba)
5703{
5704 uint32_t rc;
5705 int i;
5706 int longs;
5707
5708 /* Enable ISR already does config_port because of config_msi mbx */
5709 if (test_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag)) {
5710 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5711 if (rc)
5712 return -EIO;
5713 clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
5714 }
5715 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5716
5717 if (phba->sli_rev == 3) {
5718 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5719 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5720 } else {
5721 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5722 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5723 phba->sli3_options = 0;
5724 }
5725
5726 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5727 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5728 phba->sli_rev, phba->max_vpi);
5729 rc = lpfc_sli_ring_map(phba);
5730
5731 if (rc)
5732 goto lpfc_sli_hba_setup_error;
5733
5734 /* Initialize VPIs. */
5735 if (phba->sli_rev == LPFC_SLI_REV3) {
5736 /*
5737 * The VPI bitmask and physical ID array are allocated
5738 * and initialized once only - at driver load. A port
5739 * reset doesn't need to reinitialize this memory.
5740 */
5741 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5742 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5743 phba->vpi_bmask = kcalloc(longs,
5744 sizeof(unsigned long),
5745 GFP_KERNEL);
5746 if (!phba->vpi_bmask) {
5747 rc = -ENOMEM;
5748 goto lpfc_sli_hba_setup_error;
5749 }
5750
5751 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5752 sizeof(uint16_t),
5753 GFP_KERNEL);
5754 if (!phba->vpi_ids) {
5755 kfree(phba->vpi_bmask);
5756 rc = -ENOMEM;
5757 goto lpfc_sli_hba_setup_error;
5758 }
5759 for (i = 0; i < phba->max_vpi; i++)
5760 phba->vpi_ids[i] = i;
5761 }
5762 }
5763
5764 /* Init HBQs */
5765 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5766 rc = lpfc_sli_hbq_setup(phba);
5767 if (rc)
5768 goto lpfc_sli_hba_setup_error;
5769 }
5770 spin_lock_irq(&phba->hbalock);
5771 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5772 spin_unlock_irq(&phba->hbalock);
5773
5774 rc = lpfc_config_port_post(phba);
5775 if (rc)
5776 goto lpfc_sli_hba_setup_error;
5777
5778 return rc;
5779
5780lpfc_sli_hba_setup_error:
5781 phba->link_state = LPFC_HBA_ERROR;
5782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5783 "0445 Firmware initialization failed\n");
5784 return rc;
5785}
5786
5787/**
5788 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5789 * @phba: Pointer to HBA context object.
5790 *
5791 * This function issue a dump mailbox command to read config region
5792 * 23 and parse the records in the region and populate driver
5793 * data structure.
5794 **/
5795static int
5796lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5797{
5798 LPFC_MBOXQ_t *mboxq;
5799 struct lpfc_dmabuf *mp;
5800 struct lpfc_mqe *mqe;
5801 uint32_t data_length;
5802 int rc;
5803
5804 /* Program the default value of vlan_id and fc_map */
5805 phba->valid_vlan = 0;
5806 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5807 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5808 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5809
5810 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5811 if (!mboxq)
5812 return -ENOMEM;
5813
5814 mqe = &mboxq->u.mqe;
5815 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5816 rc = -ENOMEM;
5817 goto out_free_mboxq;
5818 }
5819
5820 mp = mboxq->ctx_buf;
5821 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5822
5823 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5824 "(%d):2571 Mailbox cmd x%x Status x%x "
5825 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5826 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5827 "CQ: x%x x%x x%x x%x\n",
5828 mboxq->vport ? mboxq->vport->vpi : 0,
5829 bf_get(lpfc_mqe_command, mqe),
5830 bf_get(lpfc_mqe_status, mqe),
5831 mqe->un.mb_words[0], mqe->un.mb_words[1],
5832 mqe->un.mb_words[2], mqe->un.mb_words[3],
5833 mqe->un.mb_words[4], mqe->un.mb_words[5],
5834 mqe->un.mb_words[6], mqe->un.mb_words[7],
5835 mqe->un.mb_words[8], mqe->un.mb_words[9],
5836 mqe->un.mb_words[10], mqe->un.mb_words[11],
5837 mqe->un.mb_words[12], mqe->un.mb_words[13],
5838 mqe->un.mb_words[14], mqe->un.mb_words[15],
5839 mqe->un.mb_words[16], mqe->un.mb_words[50],
5840 mboxq->mcqe.word0,
5841 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5842 mboxq->mcqe.trailer);
5843
5844 if (rc) {
5845 rc = -EIO;
5846 goto out_free_mboxq;
5847 }
5848 data_length = mqe->un.mb_words[5];
5849 if (data_length > DMP_RGN23_SIZE) {
5850 rc = -EIO;
5851 goto out_free_mboxq;
5852 }
5853
5854 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5855 rc = 0;
5856
5857out_free_mboxq:
5858 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
5859 return rc;
5860}
5861
5862/**
5863 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5864 * @phba: pointer to lpfc hba data structure.
5865 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5866 * @vpd: pointer to the memory to hold resulting port vpd data.
5867 * @vpd_size: On input, the number of bytes allocated to @vpd.
5868 * On output, the number of data bytes in @vpd.
5869 *
5870 * This routine executes a READ_REV SLI4 mailbox command. In
5871 * addition, this routine gets the port vpd data.
5872 *
5873 * Return codes
5874 * 0 - successful
5875 * -ENOMEM - could not allocated memory.
5876 **/
5877static int
5878lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5879 uint8_t *vpd, uint32_t *vpd_size)
5880{
5881 int rc = 0;
5882 uint32_t dma_size;
5883 struct lpfc_dmabuf *dmabuf;
5884 struct lpfc_mqe *mqe;
5885
5886 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5887 if (!dmabuf)
5888 return -ENOMEM;
5889
5890 /*
5891 * Get a DMA buffer for the vpd data resulting from the READ_REV
5892 * mailbox command.
5893 */
5894 dma_size = *vpd_size;
5895 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5896 &dmabuf->phys, GFP_KERNEL);
5897 if (!dmabuf->virt) {
5898 kfree(dmabuf);
5899 return -ENOMEM;
5900 }
5901
5902 /*
5903 * The SLI4 implementation of READ_REV conflicts at word1,
5904 * bits 31:16 and SLI4 adds vpd functionality not present
5905 * in SLI3. This code corrects the conflicts.
5906 */
5907 lpfc_read_rev(phba, mboxq);
5908 mqe = &mboxq->u.mqe;
5909 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5910 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5911 mqe->un.read_rev.word1 &= 0x0000FFFF;
5912 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5913 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5914
5915 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5916 if (rc) {
5917 dma_free_coherent(&phba->pcidev->dev, dma_size,
5918 dmabuf->virt, dmabuf->phys);
5919 kfree(dmabuf);
5920 return -EIO;
5921 }
5922
5923 /*
5924 * The available vpd length cannot be bigger than the
5925 * DMA buffer passed to the port. Catch the less than
5926 * case and update the caller's size.
5927 */
5928 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5929 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5930
5931 memcpy(vpd, dmabuf->virt, *vpd_size);
5932
5933 dma_free_coherent(&phba->pcidev->dev, dma_size,
5934 dmabuf->virt, dmabuf->phys);
5935 kfree(dmabuf);
5936 return 0;
5937}
5938
5939/**
5940 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5941 * @phba: pointer to lpfc hba data structure.
5942 *
5943 * This routine retrieves SLI4 device physical port name this PCI function
5944 * is attached to.
5945 *
5946 * Return codes
5947 * 0 - successful
5948 * otherwise - failed to retrieve controller attributes
5949 **/
5950static int
5951lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5952{
5953 LPFC_MBOXQ_t *mboxq;
5954 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5955 struct lpfc_controller_attribute *cntl_attr;
5956 void *virtaddr = NULL;
5957 uint32_t alloclen, reqlen;
5958 uint32_t shdr_status, shdr_add_status;
5959 union lpfc_sli4_cfg_shdr *shdr;
5960 int rc;
5961
5962 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5963 if (!mboxq)
5964 return -ENOMEM;
5965
5966 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5967 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5968 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5969 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5970 LPFC_SLI4_MBX_NEMBED);
5971
5972 if (alloclen < reqlen) {
5973 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5974 "3084 Allocated DMA memory size (%d) is "
5975 "less than the requested DMA memory size "
5976 "(%d)\n", alloclen, reqlen);
5977 rc = -ENOMEM;
5978 goto out_free_mboxq;
5979 }
5980 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5981 virtaddr = mboxq->sge_array->addr[0];
5982 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5983 shdr = &mbx_cntl_attr->cfg_shdr;
5984 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5985 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5986 if (shdr_status || shdr_add_status || rc) {
5987 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5988 "3085 Mailbox x%x (x%x/x%x) failed, "
5989 "rc:x%x, status:x%x, add_status:x%x\n",
5990 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5991 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5992 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5993 rc, shdr_status, shdr_add_status);
5994 rc = -ENXIO;
5995 goto out_free_mboxq;
5996 }
5997
5998 cntl_attr = &mbx_cntl_attr->cntl_attr;
5999 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6000 phba->sli4_hba.lnk_info.lnk_tp =
6001 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
6002 phba->sli4_hba.lnk_info.lnk_no =
6003 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
6004 phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
6005 phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
6006
6007 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
6008 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
6009 sizeof(phba->BIOSVersion));
6010
6011 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6012 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
6013 "flash_id: x%02x, asic_rev: x%02x\n",
6014 phba->sli4_hba.lnk_info.lnk_tp,
6015 phba->sli4_hba.lnk_info.lnk_no,
6016 phba->BIOSVersion, phba->sli4_hba.flash_id,
6017 phba->sli4_hba.asic_rev);
6018out_free_mboxq:
6019 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6020 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6021 else
6022 mempool_free(mboxq, phba->mbox_mem_pool);
6023 return rc;
6024}
6025
6026/**
6027 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
6028 * @phba: pointer to lpfc hba data structure.
6029 *
6030 * This routine retrieves SLI4 device physical port name this PCI function
6031 * is attached to.
6032 *
6033 * Return codes
6034 * 0 - successful
6035 * otherwise - failed to retrieve physical port name
6036 **/
6037static int
6038lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
6039{
6040 LPFC_MBOXQ_t *mboxq;
6041 struct lpfc_mbx_get_port_name *get_port_name;
6042 uint32_t shdr_status, shdr_add_status;
6043 union lpfc_sli4_cfg_shdr *shdr;
6044 char cport_name = 0;
6045 int rc;
6046
6047 /* We assume nothing at this point */
6048 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6049 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
6050
6051 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6052 if (!mboxq)
6053 return -ENOMEM;
6054 /* obtain link type and link number via READ_CONFIG */
6055 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6056 lpfc_sli4_read_config(phba);
6057
6058 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
6059 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
6060
6061 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
6062 goto retrieve_ppname;
6063
6064 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
6065 rc = lpfc_sli4_get_ctl_attr(phba);
6066 if (rc)
6067 goto out_free_mboxq;
6068
6069retrieve_ppname:
6070 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6071 LPFC_MBOX_OPCODE_GET_PORT_NAME,
6072 sizeof(struct lpfc_mbx_get_port_name) -
6073 sizeof(struct lpfc_sli4_cfg_mhdr),
6074 LPFC_SLI4_MBX_EMBED);
6075 get_port_name = &mboxq->u.mqe.un.get_port_name;
6076 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
6077 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
6078 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
6079 phba->sli4_hba.lnk_info.lnk_tp);
6080 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6081 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6082 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6083 if (shdr_status || shdr_add_status || rc) {
6084 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6085 "3087 Mailbox x%x (x%x/x%x) failed: "
6086 "rc:x%x, status:x%x, add_status:x%x\n",
6087 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6088 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6089 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6090 rc, shdr_status, shdr_add_status);
6091 rc = -ENXIO;
6092 goto out_free_mboxq;
6093 }
6094 switch (phba->sli4_hba.lnk_info.lnk_no) {
6095 case LPFC_LINK_NUMBER_0:
6096 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
6097 &get_port_name->u.response);
6098 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6099 break;
6100 case LPFC_LINK_NUMBER_1:
6101 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
6102 &get_port_name->u.response);
6103 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6104 break;
6105 case LPFC_LINK_NUMBER_2:
6106 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
6107 &get_port_name->u.response);
6108 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6109 break;
6110 case LPFC_LINK_NUMBER_3:
6111 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
6112 &get_port_name->u.response);
6113 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6114 break;
6115 default:
6116 break;
6117 }
6118
6119 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
6120 phba->Port[0] = cport_name;
6121 phba->Port[1] = '\0';
6122 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6123 "3091 SLI get port name: %s\n", phba->Port);
6124 }
6125
6126out_free_mboxq:
6127 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6128 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6129 else
6130 mempool_free(mboxq, phba->mbox_mem_pool);
6131 return rc;
6132}
6133
6134/**
6135 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
6136 * @phba: pointer to lpfc hba data structure.
6137 *
6138 * This routine is called to explicitly arm the SLI4 device's completion and
6139 * event queues
6140 **/
6141static void
6142lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
6143{
6144 int qidx;
6145 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
6146 struct lpfc_sli4_hdw_queue *qp;
6147 struct lpfc_queue *eq;
6148
6149 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
6150 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
6151 if (sli4_hba->nvmels_cq)
6152 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
6153 LPFC_QUEUE_REARM);
6154
6155 if (sli4_hba->hdwq) {
6156 /* Loop thru all Hardware Queues */
6157 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
6158 qp = &sli4_hba->hdwq[qidx];
6159 /* ARM the corresponding CQ */
6160 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
6161 LPFC_QUEUE_REARM);
6162 }
6163
6164 /* Loop thru all IRQ vectors */
6165 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
6166 eq = sli4_hba->hba_eq_hdl[qidx].eq;
6167 /* ARM the corresponding EQ */
6168 sli4_hba->sli4_write_eq_db(phba, eq,
6169 0, LPFC_QUEUE_REARM);
6170 }
6171 }
6172
6173 if (phba->nvmet_support) {
6174 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
6175 sli4_hba->sli4_write_cq_db(phba,
6176 sli4_hba->nvmet_cqset[qidx], 0,
6177 LPFC_QUEUE_REARM);
6178 }
6179 }
6180}
6181
6182/**
6183 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
6184 * @phba: Pointer to HBA context object.
6185 * @type: The resource extent type.
6186 * @extnt_count: buffer to hold port available extent count.
6187 * @extnt_size: buffer to hold element count per extent.
6188 *
6189 * This function calls the port and retrievs the number of available
6190 * extents and their size for a particular extent type.
6191 *
6192 * Returns: 0 if successful. Nonzero otherwise.
6193 **/
6194int
6195lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
6196 uint16_t *extnt_count, uint16_t *extnt_size)
6197{
6198 int rc = 0;
6199 uint32_t length;
6200 uint32_t mbox_tmo;
6201 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
6202 LPFC_MBOXQ_t *mbox;
6203
6204 *extnt_count = 0;
6205 *extnt_size = 0;
6206
6207 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6208 if (!mbox)
6209 return -ENOMEM;
6210
6211 /* Find out how many extents are available for this resource type */
6212 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
6213 sizeof(struct lpfc_sli4_cfg_mhdr));
6214 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6215 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
6216 length, LPFC_SLI4_MBX_EMBED);
6217
6218 /* Send an extents count of 0 - the GET doesn't use it. */
6219 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6220 LPFC_SLI4_MBX_EMBED);
6221 if (unlikely(rc)) {
6222 rc = -EIO;
6223 goto err_exit;
6224 }
6225
6226 if (!phba->sli4_hba.intr_enable)
6227 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6228 else {
6229 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6230 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6231 }
6232 if (unlikely(rc)) {
6233 rc = -EIO;
6234 goto err_exit;
6235 }
6236
6237 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
6238 if (bf_get(lpfc_mbox_hdr_status,
6239 &rsrc_info->header.cfg_shdr.response)) {
6240 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6241 "2930 Failed to get resource extents "
6242 "Status 0x%x Add'l Status 0x%x\n",
6243 bf_get(lpfc_mbox_hdr_status,
6244 &rsrc_info->header.cfg_shdr.response),
6245 bf_get(lpfc_mbox_hdr_add_status,
6246 &rsrc_info->header.cfg_shdr.response));
6247 rc = -EIO;
6248 goto err_exit;
6249 }
6250
6251 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
6252 &rsrc_info->u.rsp);
6253 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
6254 &rsrc_info->u.rsp);
6255
6256 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6257 "3162 Retrieved extents type-%d from port: count:%d, "
6258 "size:%d\n", type, *extnt_count, *extnt_size);
6259
6260err_exit:
6261 mempool_free(mbox, phba->mbox_mem_pool);
6262 return rc;
6263}
6264
6265/**
6266 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
6267 * @phba: Pointer to HBA context object.
6268 * @type: The extent type to check.
6269 *
6270 * This function reads the current available extents from the port and checks
6271 * if the extent count or extent size has changed since the last access.
6272 * Callers use this routine post port reset to understand if there is a
6273 * extent reprovisioning requirement.
6274 *
6275 * Returns:
6276 * -Error: error indicates problem.
6277 * 1: Extent count or size has changed.
6278 * 0: No changes.
6279 **/
6280static int
6281lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
6282{
6283 uint16_t curr_ext_cnt, rsrc_ext_cnt;
6284 uint16_t size_diff, rsrc_ext_size;
6285 int rc = 0;
6286 struct lpfc_rsrc_blks *rsrc_entry;
6287 struct list_head *rsrc_blk_list = NULL;
6288
6289 size_diff = 0;
6290 curr_ext_cnt = 0;
6291 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6292 &rsrc_ext_cnt,
6293 &rsrc_ext_size);
6294 if (unlikely(rc))
6295 return -EIO;
6296
6297 switch (type) {
6298 case LPFC_RSC_TYPE_FCOE_RPI:
6299 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6300 break;
6301 case LPFC_RSC_TYPE_FCOE_VPI:
6302 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
6303 break;
6304 case LPFC_RSC_TYPE_FCOE_XRI:
6305 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6306 break;
6307 case LPFC_RSC_TYPE_FCOE_VFI:
6308 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6309 break;
6310 default:
6311 break;
6312 }
6313
6314 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
6315 curr_ext_cnt++;
6316 if (rsrc_entry->rsrc_size != rsrc_ext_size)
6317 size_diff++;
6318 }
6319
6320 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
6321 rc = 1;
6322
6323 return rc;
6324}
6325
6326/**
6327 * lpfc_sli4_cfg_post_extnts -
6328 * @phba: Pointer to HBA context object.
6329 * @extnt_cnt: number of available extents.
6330 * @type: the extent type (rpi, xri, vfi, vpi).
6331 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
6332 * @mbox: pointer to the caller's allocated mailbox structure.
6333 *
6334 * This function executes the extents allocation request. It also
6335 * takes care of the amount of memory needed to allocate or get the
6336 * allocated extents. It is the caller's responsibility to evaluate
6337 * the response.
6338 *
6339 * Returns:
6340 * -Error: Error value describes the condition found.
6341 * 0: if successful
6342 **/
6343static int
6344lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6345 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6346{
6347 int rc = 0;
6348 uint32_t req_len;
6349 uint32_t emb_len;
6350 uint32_t alloc_len, mbox_tmo;
6351
6352 /* Calculate the total requested length of the dma memory */
6353 req_len = extnt_cnt * sizeof(uint16_t);
6354
6355 /*
6356 * Calculate the size of an embedded mailbox. The uint32_t
6357 * accounts for extents-specific word.
6358 */
6359 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6360 sizeof(uint32_t);
6361
6362 /*
6363 * Presume the allocation and response will fit into an embedded
6364 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6365 */
6366 *emb = LPFC_SLI4_MBX_EMBED;
6367 if (req_len > emb_len) {
6368 req_len = extnt_cnt * sizeof(uint16_t) +
6369 sizeof(union lpfc_sli4_cfg_shdr) +
6370 sizeof(uint32_t);
6371 *emb = LPFC_SLI4_MBX_NEMBED;
6372 }
6373
6374 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6375 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6376 req_len, *emb);
6377 if (alloc_len < req_len) {
6378 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6379 "2982 Allocated DMA memory size (x%x) is "
6380 "less than the requested DMA memory "
6381 "size (x%x)\n", alloc_len, req_len);
6382 return -ENOMEM;
6383 }
6384 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6385 if (unlikely(rc))
6386 return -EIO;
6387
6388 if (!phba->sli4_hba.intr_enable)
6389 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6390 else {
6391 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6392 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6393 }
6394
6395 if (unlikely(rc))
6396 rc = -EIO;
6397 return rc;
6398}
6399
6400/**
6401 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6402 * @phba: Pointer to HBA context object.
6403 * @type: The resource extent type to allocate.
6404 *
6405 * This function allocates the number of elements for the specified
6406 * resource type.
6407 **/
6408static int
6409lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6410{
6411 bool emb = false;
6412 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6413 uint16_t rsrc_id, rsrc_start, j, k;
6414 uint16_t *ids;
6415 int i, rc;
6416 unsigned long longs;
6417 unsigned long *bmask;
6418 struct lpfc_rsrc_blks *rsrc_blks;
6419 LPFC_MBOXQ_t *mbox;
6420 uint32_t length;
6421 struct lpfc_id_range *id_array = NULL;
6422 void *virtaddr = NULL;
6423 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6424 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6425 struct list_head *ext_blk_list;
6426
6427 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6428 &rsrc_cnt,
6429 &rsrc_size);
6430 if (unlikely(rc))
6431 return -EIO;
6432
6433 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6434 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6435 "3009 No available Resource Extents "
6436 "for resource type 0x%x: Count: 0x%x, "
6437 "Size 0x%x\n", type, rsrc_cnt,
6438 rsrc_size);
6439 return -ENOMEM;
6440 }
6441
6442 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6443 "2903 Post resource extents type-0x%x: "
6444 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6445
6446 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6447 if (!mbox)
6448 return -ENOMEM;
6449
6450 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6451 if (unlikely(rc)) {
6452 rc = -EIO;
6453 goto err_exit;
6454 }
6455
6456 /*
6457 * Figure out where the response is located. Then get local pointers
6458 * to the response data. The port does not guarantee to respond to
6459 * all extents counts request so update the local variable with the
6460 * allocated count from the port.
6461 */
6462 if (emb == LPFC_SLI4_MBX_EMBED) {
6463 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6464 id_array = &rsrc_ext->u.rsp.id[0];
6465 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6466 } else {
6467 virtaddr = mbox->sge_array->addr[0];
6468 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6469 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6470 id_array = &n_rsrc->id;
6471 }
6472
6473 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6474 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6475
6476 /*
6477 * Based on the resource size and count, correct the base and max
6478 * resource values.
6479 */
6480 length = sizeof(struct lpfc_rsrc_blks);
6481 switch (type) {
6482 case LPFC_RSC_TYPE_FCOE_RPI:
6483 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6484 sizeof(unsigned long),
6485 GFP_KERNEL);
6486 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6487 rc = -ENOMEM;
6488 goto err_exit;
6489 }
6490 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6491 sizeof(uint16_t),
6492 GFP_KERNEL);
6493 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6494 kfree(phba->sli4_hba.rpi_bmask);
6495 rc = -ENOMEM;
6496 goto err_exit;
6497 }
6498
6499 /*
6500 * The next_rpi was initialized with the maximum available
6501 * count but the port may allocate a smaller number. Catch
6502 * that case and update the next_rpi.
6503 */
6504 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6505
6506 /* Initialize local ptrs for common extent processing later. */
6507 bmask = phba->sli4_hba.rpi_bmask;
6508 ids = phba->sli4_hba.rpi_ids;
6509 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6510 break;
6511 case LPFC_RSC_TYPE_FCOE_VPI:
6512 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6513 GFP_KERNEL);
6514 if (unlikely(!phba->vpi_bmask)) {
6515 rc = -ENOMEM;
6516 goto err_exit;
6517 }
6518 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6519 GFP_KERNEL);
6520 if (unlikely(!phba->vpi_ids)) {
6521 kfree(phba->vpi_bmask);
6522 rc = -ENOMEM;
6523 goto err_exit;
6524 }
6525
6526 /* Initialize local ptrs for common extent processing later. */
6527 bmask = phba->vpi_bmask;
6528 ids = phba->vpi_ids;
6529 ext_blk_list = &phba->lpfc_vpi_blk_list;
6530 break;
6531 case LPFC_RSC_TYPE_FCOE_XRI:
6532 phba->sli4_hba.xri_bmask = kcalloc(longs,
6533 sizeof(unsigned long),
6534 GFP_KERNEL);
6535 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6536 rc = -ENOMEM;
6537 goto err_exit;
6538 }
6539 phba->sli4_hba.max_cfg_param.xri_used = 0;
6540 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6541 sizeof(uint16_t),
6542 GFP_KERNEL);
6543 if (unlikely(!phba->sli4_hba.xri_ids)) {
6544 kfree(phba->sli4_hba.xri_bmask);
6545 rc = -ENOMEM;
6546 goto err_exit;
6547 }
6548
6549 /* Initialize local ptrs for common extent processing later. */
6550 bmask = phba->sli4_hba.xri_bmask;
6551 ids = phba->sli4_hba.xri_ids;
6552 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6553 break;
6554 case LPFC_RSC_TYPE_FCOE_VFI:
6555 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6556 sizeof(unsigned long),
6557 GFP_KERNEL);
6558 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6559 rc = -ENOMEM;
6560 goto err_exit;
6561 }
6562 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6563 sizeof(uint16_t),
6564 GFP_KERNEL);
6565 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6566 kfree(phba->sli4_hba.vfi_bmask);
6567 rc = -ENOMEM;
6568 goto err_exit;
6569 }
6570
6571 /* Initialize local ptrs for common extent processing later. */
6572 bmask = phba->sli4_hba.vfi_bmask;
6573 ids = phba->sli4_hba.vfi_ids;
6574 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6575 break;
6576 default:
6577 /* Unsupported Opcode. Fail call. */
6578 id_array = NULL;
6579 bmask = NULL;
6580 ids = NULL;
6581 ext_blk_list = NULL;
6582 goto err_exit;
6583 }
6584
6585 /*
6586 * Complete initializing the extent configuration with the
6587 * allocated ids assigned to this function. The bitmask serves
6588 * as an index into the array and manages the available ids. The
6589 * array just stores the ids communicated to the port via the wqes.
6590 */
6591 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6592 if ((i % 2) == 0)
6593 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6594 &id_array[k]);
6595 else
6596 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6597 &id_array[k]);
6598
6599 rsrc_blks = kzalloc(length, GFP_KERNEL);
6600 if (unlikely(!rsrc_blks)) {
6601 rc = -ENOMEM;
6602 kfree(bmask);
6603 kfree(ids);
6604 goto err_exit;
6605 }
6606 rsrc_blks->rsrc_start = rsrc_id;
6607 rsrc_blks->rsrc_size = rsrc_size;
6608 list_add_tail(&rsrc_blks->list, ext_blk_list);
6609 rsrc_start = rsrc_id;
6610 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6611 phba->sli4_hba.io_xri_start = rsrc_start +
6612 lpfc_sli4_get_iocb_cnt(phba);
6613 }
6614
6615 while (rsrc_id < (rsrc_start + rsrc_size)) {
6616 ids[j] = rsrc_id;
6617 rsrc_id++;
6618 j++;
6619 }
6620 /* Entire word processed. Get next word.*/
6621 if ((i % 2) == 1)
6622 k++;
6623 }
6624 err_exit:
6625 lpfc_sli4_mbox_cmd_free(phba, mbox);
6626 return rc;
6627}
6628
6629
6630
6631/**
6632 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6633 * @phba: Pointer to HBA context object.
6634 * @type: the extent's type.
6635 *
6636 * This function deallocates all extents of a particular resource type.
6637 * SLI4 does not allow for deallocating a particular extent range. It
6638 * is the caller's responsibility to release all kernel memory resources.
6639 **/
6640static int
6641lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6642{
6643 int rc;
6644 uint32_t length, mbox_tmo = 0;
6645 LPFC_MBOXQ_t *mbox;
6646 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6647 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6648
6649 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6650 if (!mbox)
6651 return -ENOMEM;
6652
6653 /*
6654 * This function sends an embedded mailbox because it only sends the
6655 * the resource type. All extents of this type are released by the
6656 * port.
6657 */
6658 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6659 sizeof(struct lpfc_sli4_cfg_mhdr));
6660 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6661 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6662 length, LPFC_SLI4_MBX_EMBED);
6663
6664 /* Send an extents count of 0 - the dealloc doesn't use it. */
6665 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6666 LPFC_SLI4_MBX_EMBED);
6667 if (unlikely(rc)) {
6668 rc = -EIO;
6669 goto out_free_mbox;
6670 }
6671 if (!phba->sli4_hba.intr_enable)
6672 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6673 else {
6674 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6675 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6676 }
6677 if (unlikely(rc)) {
6678 rc = -EIO;
6679 goto out_free_mbox;
6680 }
6681
6682 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6683 if (bf_get(lpfc_mbox_hdr_status,
6684 &dealloc_rsrc->header.cfg_shdr.response)) {
6685 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6686 "2919 Failed to release resource extents "
6687 "for type %d - Status 0x%x Add'l Status 0x%x. "
6688 "Resource memory not released.\n",
6689 type,
6690 bf_get(lpfc_mbox_hdr_status,
6691 &dealloc_rsrc->header.cfg_shdr.response),
6692 bf_get(lpfc_mbox_hdr_add_status,
6693 &dealloc_rsrc->header.cfg_shdr.response));
6694 rc = -EIO;
6695 goto out_free_mbox;
6696 }
6697
6698 /* Release kernel memory resources for the specific type. */
6699 switch (type) {
6700 case LPFC_RSC_TYPE_FCOE_VPI:
6701 kfree(phba->vpi_bmask);
6702 kfree(phba->vpi_ids);
6703 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6704 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6705 &phba->lpfc_vpi_blk_list, list) {
6706 list_del_init(&rsrc_blk->list);
6707 kfree(rsrc_blk);
6708 }
6709 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6710 break;
6711 case LPFC_RSC_TYPE_FCOE_XRI:
6712 kfree(phba->sli4_hba.xri_bmask);
6713 kfree(phba->sli4_hba.xri_ids);
6714 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6715 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6716 list_del_init(&rsrc_blk->list);
6717 kfree(rsrc_blk);
6718 }
6719 break;
6720 case LPFC_RSC_TYPE_FCOE_VFI:
6721 kfree(phba->sli4_hba.vfi_bmask);
6722 kfree(phba->sli4_hba.vfi_ids);
6723 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6724 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6725 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6726 list_del_init(&rsrc_blk->list);
6727 kfree(rsrc_blk);
6728 }
6729 break;
6730 case LPFC_RSC_TYPE_FCOE_RPI:
6731 /* RPI bitmask and physical id array are cleaned up earlier. */
6732 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6733 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6734 list_del_init(&rsrc_blk->list);
6735 kfree(rsrc_blk);
6736 }
6737 break;
6738 default:
6739 break;
6740 }
6741
6742 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6743
6744 out_free_mbox:
6745 mempool_free(mbox, phba->mbox_mem_pool);
6746 return rc;
6747}
6748
6749static void
6750lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6751 uint32_t feature)
6752{
6753 uint32_t len;
6754 u32 sig_freq = 0;
6755
6756 len = sizeof(struct lpfc_mbx_set_feature) -
6757 sizeof(struct lpfc_sli4_cfg_mhdr);
6758 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6759 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6760 LPFC_SLI4_MBX_EMBED);
6761
6762 switch (feature) {
6763 case LPFC_SET_UE_RECOVERY:
6764 bf_set(lpfc_mbx_set_feature_UER,
6765 &mbox->u.mqe.un.set_feature, 1);
6766 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6767 mbox->u.mqe.un.set_feature.param_len = 8;
6768 break;
6769 case LPFC_SET_MDS_DIAGS:
6770 bf_set(lpfc_mbx_set_feature_mds,
6771 &mbox->u.mqe.un.set_feature, 1);
6772 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6773 &mbox->u.mqe.un.set_feature, 1);
6774 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6775 mbox->u.mqe.un.set_feature.param_len = 8;
6776 break;
6777 case LPFC_SET_CGN_SIGNAL:
6778 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6779 sig_freq = 0;
6780 else
6781 sig_freq = phba->cgn_sig_freq;
6782
6783 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6784 bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
6785 &mbox->u.mqe.un.set_feature, sig_freq);
6786 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6787 &mbox->u.mqe.un.set_feature, sig_freq);
6788 }
6789
6790 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
6791 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6792 &mbox->u.mqe.un.set_feature, sig_freq);
6793
6794 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6795 phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
6796 sig_freq = 0;
6797 else
6798 sig_freq = lpfc_acqe_cgn_frequency;
6799
6800 bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
6801 &mbox->u.mqe.un.set_feature, sig_freq);
6802
6803 mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
6804 mbox->u.mqe.un.set_feature.param_len = 12;
6805 break;
6806 case LPFC_SET_DUAL_DUMP:
6807 bf_set(lpfc_mbx_set_feature_dd,
6808 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6809 bf_set(lpfc_mbx_set_feature_ddquery,
6810 &mbox->u.mqe.un.set_feature, 0);
6811 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6812 mbox->u.mqe.un.set_feature.param_len = 4;
6813 break;
6814 case LPFC_SET_ENABLE_MI:
6815 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
6816 mbox->u.mqe.un.set_feature.param_len = 4;
6817 bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
6818 phba->pport->cfg_lun_queue_depth);
6819 bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
6820 phba->sli4_hba.pc_sli4_params.mi_ver);
6821 break;
6822 case LPFC_SET_LD_SIGNAL:
6823 mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL;
6824 mbox->u.mqe.un.set_feature.param_len = 16;
6825 bf_set(lpfc_mbx_set_feature_lds_qry,
6826 &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP);
6827 break;
6828 case LPFC_SET_ENABLE_CMF:
6829 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
6830 mbox->u.mqe.un.set_feature.param_len = 4;
6831 bf_set(lpfc_mbx_set_feature_cmf,
6832 &mbox->u.mqe.un.set_feature, 1);
6833 break;
6834 }
6835 return;
6836}
6837
6838/**
6839 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6840 * @phba: Pointer to HBA context object.
6841 *
6842 * Disable FW logging into host memory on the adapter. To
6843 * be done before reading logs from the host memory.
6844 **/
6845void
6846lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6847{
6848 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6849
6850 spin_lock_irq(&phba->ras_fwlog_lock);
6851 ras_fwlog->state = INACTIVE;
6852 spin_unlock_irq(&phba->ras_fwlog_lock);
6853
6854 /* Disable FW logging to host memory */
6855 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6856 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6857
6858 /* Wait 10ms for firmware to stop using DMA buffer */
6859 usleep_range(10 * 1000, 20 * 1000);
6860}
6861
6862/**
6863 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6864 * @phba: Pointer to HBA context object.
6865 *
6866 * This function is called to free memory allocated for RAS FW logging
6867 * support in the driver.
6868 **/
6869void
6870lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6871{
6872 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6873 struct lpfc_dmabuf *dmabuf, *next;
6874
6875 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6876 list_for_each_entry_safe(dmabuf, next,
6877 &ras_fwlog->fwlog_buff_list,
6878 list) {
6879 list_del(&dmabuf->list);
6880 dma_free_coherent(&phba->pcidev->dev,
6881 LPFC_RAS_MAX_ENTRY_SIZE,
6882 dmabuf->virt, dmabuf->phys);
6883 kfree(dmabuf);
6884 }
6885 }
6886
6887 if (ras_fwlog->lwpd.virt) {
6888 dma_free_coherent(&phba->pcidev->dev,
6889 sizeof(uint32_t) * 2,
6890 ras_fwlog->lwpd.virt,
6891 ras_fwlog->lwpd.phys);
6892 ras_fwlog->lwpd.virt = NULL;
6893 }
6894
6895 spin_lock_irq(&phba->ras_fwlog_lock);
6896 ras_fwlog->state = INACTIVE;
6897 spin_unlock_irq(&phba->ras_fwlog_lock);
6898}
6899
6900/**
6901 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6902 * @phba: Pointer to HBA context object.
6903 * @fwlog_buff_count: Count of buffers to be created.
6904 *
6905 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6906 * to update FW log is posted to the adapter.
6907 * Buffer count is calculated based on module param ras_fwlog_buffsize
6908 * Size of each buffer posted to FW is 64K.
6909 **/
6910
6911static int
6912lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6913 uint32_t fwlog_buff_count)
6914{
6915 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6916 struct lpfc_dmabuf *dmabuf;
6917 int rc = 0, i = 0;
6918
6919 /* Initialize List */
6920 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6921
6922 /* Allocate memory for the LWPD */
6923 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6924 sizeof(uint32_t) * 2,
6925 &ras_fwlog->lwpd.phys,
6926 GFP_KERNEL);
6927 if (!ras_fwlog->lwpd.virt) {
6928 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6929 "6185 LWPD Memory Alloc Failed\n");
6930
6931 return -ENOMEM;
6932 }
6933
6934 ras_fwlog->fw_buffcount = fwlog_buff_count;
6935 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6936 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6937 GFP_KERNEL);
6938 if (!dmabuf) {
6939 rc = -ENOMEM;
6940 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6941 "6186 Memory Alloc failed FW logging");
6942 goto free_mem;
6943 }
6944
6945 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6946 LPFC_RAS_MAX_ENTRY_SIZE,
6947 &dmabuf->phys, GFP_KERNEL);
6948 if (!dmabuf->virt) {
6949 kfree(dmabuf);
6950 rc = -ENOMEM;
6951 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6952 "6187 DMA Alloc Failed FW logging");
6953 goto free_mem;
6954 }
6955 dmabuf->buffer_tag = i;
6956 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6957 }
6958
6959free_mem:
6960 if (rc)
6961 lpfc_sli4_ras_dma_free(phba);
6962
6963 return rc;
6964}
6965
6966/**
6967 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6968 * @phba: pointer to lpfc hba data structure.
6969 * @pmb: pointer to the driver internal queue element for mailbox command.
6970 *
6971 * Completion handler for driver's RAS MBX command to the device.
6972 **/
6973static void
6974lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6975{
6976 MAILBOX_t *mb;
6977 union lpfc_sli4_cfg_shdr *shdr;
6978 uint32_t shdr_status, shdr_add_status;
6979 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6980
6981 mb = &pmb->u.mb;
6982
6983 shdr = (union lpfc_sli4_cfg_shdr *)
6984 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6985 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6986 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6987
6988 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6989 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6990 "6188 FW LOG mailbox "
6991 "completed with status x%x add_status x%x,"
6992 " mbx status x%x\n",
6993 shdr_status, shdr_add_status, mb->mbxStatus);
6994
6995 ras_fwlog->ras_hwsupport = false;
6996 goto disable_ras;
6997 }
6998
6999 spin_lock_irq(&phba->ras_fwlog_lock);
7000 ras_fwlog->state = ACTIVE;
7001 spin_unlock_irq(&phba->ras_fwlog_lock);
7002 mempool_free(pmb, phba->mbox_mem_pool);
7003
7004 return;
7005
7006disable_ras:
7007 /* Free RAS DMA memory */
7008 lpfc_sli4_ras_dma_free(phba);
7009 mempool_free(pmb, phba->mbox_mem_pool);
7010}
7011
7012/**
7013 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
7014 * @phba: pointer to lpfc hba data structure.
7015 * @fwlog_level: Logging verbosity level.
7016 * @fwlog_enable: Enable/Disable logging.
7017 *
7018 * Initialize memory and post mailbox command to enable FW logging in host
7019 * memory.
7020 **/
7021int
7022lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
7023 uint32_t fwlog_level,
7024 uint32_t fwlog_enable)
7025{
7026 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
7027 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
7028 struct lpfc_dmabuf *dmabuf;
7029 LPFC_MBOXQ_t *mbox;
7030 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
7031 int rc = 0;
7032
7033 spin_lock_irq(&phba->ras_fwlog_lock);
7034 ras_fwlog->state = INACTIVE;
7035 spin_unlock_irq(&phba->ras_fwlog_lock);
7036
7037 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
7038 phba->cfg_ras_fwlog_buffsize);
7039 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
7040
7041 /*
7042 * If re-enabling FW logging support use earlier allocated
7043 * DMA buffers while posting MBX command.
7044 **/
7045 if (!ras_fwlog->lwpd.virt) {
7046 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
7047 if (rc) {
7048 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7049 "6189 FW Log Memory Allocation Failed");
7050 return rc;
7051 }
7052 }
7053
7054 /* Setup Mailbox command */
7055 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7056 if (!mbox) {
7057 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7058 "6190 RAS MBX Alloc Failed");
7059 rc = -ENOMEM;
7060 goto mem_free;
7061 }
7062
7063 ras_fwlog->fw_loglevel = fwlog_level;
7064 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
7065 sizeof(struct lpfc_sli4_cfg_mhdr));
7066
7067 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
7068 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
7069 len, LPFC_SLI4_MBX_EMBED);
7070
7071 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
7072 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
7073 fwlog_enable);
7074 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
7075 ras_fwlog->fw_loglevel);
7076 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
7077 ras_fwlog->fw_buffcount);
7078 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
7079 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
7080
7081 /* Update DMA buffer address */
7082 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
7083 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
7084
7085 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
7086 putPaddrLow(dmabuf->phys);
7087
7088 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
7089 putPaddrHigh(dmabuf->phys);
7090 }
7091
7092 /* Update LPWD address */
7093 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
7094 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
7095
7096 spin_lock_irq(&phba->ras_fwlog_lock);
7097 ras_fwlog->state = REG_INPROGRESS;
7098 spin_unlock_irq(&phba->ras_fwlog_lock);
7099 mbox->vport = phba->pport;
7100 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
7101
7102 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7103
7104 if (rc == MBX_NOT_FINISHED) {
7105 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7106 "6191 FW-Log Mailbox failed. "
7107 "status %d mbxStatus : x%x", rc,
7108 bf_get(lpfc_mqe_status, &mbox->u.mqe));
7109 mempool_free(mbox, phba->mbox_mem_pool);
7110 rc = -EIO;
7111 goto mem_free;
7112 } else
7113 rc = 0;
7114mem_free:
7115 if (rc)
7116 lpfc_sli4_ras_dma_free(phba);
7117
7118 return rc;
7119}
7120
7121/**
7122 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
7123 * @phba: Pointer to HBA context object.
7124 *
7125 * Check if RAS is supported on the adapter and initialize it.
7126 **/
7127void
7128lpfc_sli4_ras_setup(struct lpfc_hba *phba)
7129{
7130 /* Check RAS FW Log needs to be enabled or not */
7131 if (lpfc_check_fwlog_support(phba))
7132 return;
7133
7134 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
7135 LPFC_RAS_ENABLE_LOGGING);
7136}
7137
7138/**
7139 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
7140 * @phba: Pointer to HBA context object.
7141 *
7142 * This function allocates all SLI4 resource identifiers.
7143 **/
7144int
7145lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
7146{
7147 int i, rc, error = 0;
7148 uint16_t count, base;
7149 unsigned long longs;
7150
7151 if (!phba->sli4_hba.rpi_hdrs_in_use)
7152 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
7153 if (phba->sli4_hba.extents_in_use) {
7154 /*
7155 * The port supports resource extents. The XRI, VPI, VFI, RPI
7156 * resource extent count must be read and allocated before
7157 * provisioning the resource id arrays.
7158 */
7159 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7160 LPFC_IDX_RSRC_RDY) {
7161 /*
7162 * Extent-based resources are set - the driver could
7163 * be in a port reset. Figure out if any corrective
7164 * actions need to be taken.
7165 */
7166 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7167 LPFC_RSC_TYPE_FCOE_VFI);
7168 if (rc != 0)
7169 error++;
7170 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7171 LPFC_RSC_TYPE_FCOE_VPI);
7172 if (rc != 0)
7173 error++;
7174 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7175 LPFC_RSC_TYPE_FCOE_XRI);
7176 if (rc != 0)
7177 error++;
7178 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7179 LPFC_RSC_TYPE_FCOE_RPI);
7180 if (rc != 0)
7181 error++;
7182
7183 /*
7184 * It's possible that the number of resources
7185 * provided to this port instance changed between
7186 * resets. Detect this condition and reallocate
7187 * resources. Otherwise, there is no action.
7188 */
7189 if (error) {
7190 lpfc_printf_log(phba, KERN_INFO,
7191 LOG_MBOX | LOG_INIT,
7192 "2931 Detected extent resource "
7193 "change. Reallocating all "
7194 "extents.\n");
7195 rc = lpfc_sli4_dealloc_extent(phba,
7196 LPFC_RSC_TYPE_FCOE_VFI);
7197 rc = lpfc_sli4_dealloc_extent(phba,
7198 LPFC_RSC_TYPE_FCOE_VPI);
7199 rc = lpfc_sli4_dealloc_extent(phba,
7200 LPFC_RSC_TYPE_FCOE_XRI);
7201 rc = lpfc_sli4_dealloc_extent(phba,
7202 LPFC_RSC_TYPE_FCOE_RPI);
7203 } else
7204 return 0;
7205 }
7206
7207 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7208 if (unlikely(rc))
7209 goto err_exit;
7210
7211 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7212 if (unlikely(rc))
7213 goto err_exit;
7214
7215 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7216 if (unlikely(rc))
7217 goto err_exit;
7218
7219 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7220 if (unlikely(rc))
7221 goto err_exit;
7222 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7223 LPFC_IDX_RSRC_RDY);
7224 return rc;
7225 } else {
7226 /*
7227 * The port does not support resource extents. The XRI, VPI,
7228 * VFI, RPI resource ids were determined from READ_CONFIG.
7229 * Just allocate the bitmasks and provision the resource id
7230 * arrays. If a port reset is active, the resources don't
7231 * need any action - just exit.
7232 */
7233 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7234 LPFC_IDX_RSRC_RDY) {
7235 lpfc_sli4_dealloc_resource_identifiers(phba);
7236 lpfc_sli4_remove_rpis(phba);
7237 }
7238 /* RPIs. */
7239 count = phba->sli4_hba.max_cfg_param.max_rpi;
7240 if (count <= 0) {
7241 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7242 "3279 Invalid provisioning of "
7243 "rpi:%d\n", count);
7244 rc = -EINVAL;
7245 goto err_exit;
7246 }
7247 base = phba->sli4_hba.max_cfg_param.rpi_base;
7248 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7249 phba->sli4_hba.rpi_bmask = kcalloc(longs,
7250 sizeof(unsigned long),
7251 GFP_KERNEL);
7252 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
7253 rc = -ENOMEM;
7254 goto err_exit;
7255 }
7256 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
7257 GFP_KERNEL);
7258 if (unlikely(!phba->sli4_hba.rpi_ids)) {
7259 rc = -ENOMEM;
7260 goto free_rpi_bmask;
7261 }
7262
7263 for (i = 0; i < count; i++)
7264 phba->sli4_hba.rpi_ids[i] = base + i;
7265
7266 /* VPIs. */
7267 count = phba->sli4_hba.max_cfg_param.max_vpi;
7268 if (count <= 0) {
7269 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7270 "3280 Invalid provisioning of "
7271 "vpi:%d\n", count);
7272 rc = -EINVAL;
7273 goto free_rpi_ids;
7274 }
7275 base = phba->sli4_hba.max_cfg_param.vpi_base;
7276 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7277 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
7278 GFP_KERNEL);
7279 if (unlikely(!phba->vpi_bmask)) {
7280 rc = -ENOMEM;
7281 goto free_rpi_ids;
7282 }
7283 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
7284 GFP_KERNEL);
7285 if (unlikely(!phba->vpi_ids)) {
7286 rc = -ENOMEM;
7287 goto free_vpi_bmask;
7288 }
7289
7290 for (i = 0; i < count; i++)
7291 phba->vpi_ids[i] = base + i;
7292
7293 /* XRIs. */
7294 count = phba->sli4_hba.max_cfg_param.max_xri;
7295 if (count <= 0) {
7296 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7297 "3281 Invalid provisioning of "
7298 "xri:%d\n", count);
7299 rc = -EINVAL;
7300 goto free_vpi_ids;
7301 }
7302 base = phba->sli4_hba.max_cfg_param.xri_base;
7303 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7304 phba->sli4_hba.xri_bmask = kcalloc(longs,
7305 sizeof(unsigned long),
7306 GFP_KERNEL);
7307 if (unlikely(!phba->sli4_hba.xri_bmask)) {
7308 rc = -ENOMEM;
7309 goto free_vpi_ids;
7310 }
7311 phba->sli4_hba.max_cfg_param.xri_used = 0;
7312 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
7313 GFP_KERNEL);
7314 if (unlikely(!phba->sli4_hba.xri_ids)) {
7315 rc = -ENOMEM;
7316 goto free_xri_bmask;
7317 }
7318
7319 for (i = 0; i < count; i++)
7320 phba->sli4_hba.xri_ids[i] = base + i;
7321
7322 /* VFIs. */
7323 count = phba->sli4_hba.max_cfg_param.max_vfi;
7324 if (count <= 0) {
7325 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7326 "3282 Invalid provisioning of "
7327 "vfi:%d\n", count);
7328 rc = -EINVAL;
7329 goto free_xri_ids;
7330 }
7331 base = phba->sli4_hba.max_cfg_param.vfi_base;
7332 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7333 phba->sli4_hba.vfi_bmask = kcalloc(longs,
7334 sizeof(unsigned long),
7335 GFP_KERNEL);
7336 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
7337 rc = -ENOMEM;
7338 goto free_xri_ids;
7339 }
7340 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
7341 GFP_KERNEL);
7342 if (unlikely(!phba->sli4_hba.vfi_ids)) {
7343 rc = -ENOMEM;
7344 goto free_vfi_bmask;
7345 }
7346
7347 for (i = 0; i < count; i++)
7348 phba->sli4_hba.vfi_ids[i] = base + i;
7349
7350 /*
7351 * Mark all resources ready. An HBA reset doesn't need
7352 * to reset the initialization.
7353 */
7354 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7355 LPFC_IDX_RSRC_RDY);
7356 return 0;
7357 }
7358
7359 free_vfi_bmask:
7360 kfree(phba->sli4_hba.vfi_bmask);
7361 phba->sli4_hba.vfi_bmask = NULL;
7362 free_xri_ids:
7363 kfree(phba->sli4_hba.xri_ids);
7364 phba->sli4_hba.xri_ids = NULL;
7365 free_xri_bmask:
7366 kfree(phba->sli4_hba.xri_bmask);
7367 phba->sli4_hba.xri_bmask = NULL;
7368 free_vpi_ids:
7369 kfree(phba->vpi_ids);
7370 phba->vpi_ids = NULL;
7371 free_vpi_bmask:
7372 kfree(phba->vpi_bmask);
7373 phba->vpi_bmask = NULL;
7374 free_rpi_ids:
7375 kfree(phba->sli4_hba.rpi_ids);
7376 phba->sli4_hba.rpi_ids = NULL;
7377 free_rpi_bmask:
7378 kfree(phba->sli4_hba.rpi_bmask);
7379 phba->sli4_hba.rpi_bmask = NULL;
7380 err_exit:
7381 return rc;
7382}
7383
7384/**
7385 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7386 * @phba: Pointer to HBA context object.
7387 *
7388 * This function allocates the number of elements for the specified
7389 * resource type.
7390 **/
7391int
7392lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7393{
7394 if (phba->sli4_hba.extents_in_use) {
7395 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7396 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7397 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7398 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7399 } else {
7400 kfree(phba->vpi_bmask);
7401 phba->sli4_hba.max_cfg_param.vpi_used = 0;
7402 kfree(phba->vpi_ids);
7403 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7404 kfree(phba->sli4_hba.xri_bmask);
7405 kfree(phba->sli4_hba.xri_ids);
7406 kfree(phba->sli4_hba.vfi_bmask);
7407 kfree(phba->sli4_hba.vfi_ids);
7408 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7409 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7410 }
7411
7412 return 0;
7413}
7414
7415/**
7416 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7417 * @phba: Pointer to HBA context object.
7418 * @type: The resource extent type.
7419 * @extnt_cnt: buffer to hold port extent count response
7420 * @extnt_size: buffer to hold port extent size response.
7421 *
7422 * This function calls the port to read the host allocated extents
7423 * for a particular type.
7424 **/
7425int
7426lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7427 uint16_t *extnt_cnt, uint16_t *extnt_size)
7428{
7429 bool emb;
7430 int rc = 0;
7431 uint16_t curr_blks = 0;
7432 uint32_t req_len, emb_len;
7433 uint32_t alloc_len, mbox_tmo;
7434 struct list_head *blk_list_head;
7435 struct lpfc_rsrc_blks *rsrc_blk;
7436 LPFC_MBOXQ_t *mbox;
7437 void *virtaddr = NULL;
7438 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7439 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7440 union lpfc_sli4_cfg_shdr *shdr;
7441
7442 switch (type) {
7443 case LPFC_RSC_TYPE_FCOE_VPI:
7444 blk_list_head = &phba->lpfc_vpi_blk_list;
7445 break;
7446 case LPFC_RSC_TYPE_FCOE_XRI:
7447 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7448 break;
7449 case LPFC_RSC_TYPE_FCOE_VFI:
7450 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7451 break;
7452 case LPFC_RSC_TYPE_FCOE_RPI:
7453 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7454 break;
7455 default:
7456 return -EIO;
7457 }
7458
7459 /* Count the number of extents currently allocatd for this type. */
7460 list_for_each_entry(rsrc_blk, blk_list_head, list) {
7461 if (curr_blks == 0) {
7462 /*
7463 * The GET_ALLOCATED mailbox does not return the size,
7464 * just the count. The size should be just the size
7465 * stored in the current allocated block and all sizes
7466 * for an extent type are the same so set the return
7467 * value now.
7468 */
7469 *extnt_size = rsrc_blk->rsrc_size;
7470 }
7471 curr_blks++;
7472 }
7473
7474 /*
7475 * Calculate the size of an embedded mailbox. The uint32_t
7476 * accounts for extents-specific word.
7477 */
7478 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7479 sizeof(uint32_t);
7480
7481 /*
7482 * Presume the allocation and response will fit into an embedded
7483 * mailbox. If not true, reconfigure to a non-embedded mailbox.
7484 */
7485 emb = LPFC_SLI4_MBX_EMBED;
7486 req_len = emb_len;
7487 if (req_len > emb_len) {
7488 req_len = curr_blks * sizeof(uint16_t) +
7489 sizeof(union lpfc_sli4_cfg_shdr) +
7490 sizeof(uint32_t);
7491 emb = LPFC_SLI4_MBX_NEMBED;
7492 }
7493
7494 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7495 if (!mbox)
7496 return -ENOMEM;
7497 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7498
7499 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7500 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7501 req_len, emb);
7502 if (alloc_len < req_len) {
7503 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7504 "2983 Allocated DMA memory size (x%x) is "
7505 "less than the requested DMA memory "
7506 "size (x%x)\n", alloc_len, req_len);
7507 rc = -ENOMEM;
7508 goto err_exit;
7509 }
7510 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7511 if (unlikely(rc)) {
7512 rc = -EIO;
7513 goto err_exit;
7514 }
7515
7516 if (!phba->sli4_hba.intr_enable)
7517 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7518 else {
7519 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7520 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7521 }
7522
7523 if (unlikely(rc)) {
7524 rc = -EIO;
7525 goto err_exit;
7526 }
7527
7528 /*
7529 * Figure out where the response is located. Then get local pointers
7530 * to the response data. The port does not guarantee to respond to
7531 * all extents counts request so update the local variable with the
7532 * allocated count from the port.
7533 */
7534 if (emb == LPFC_SLI4_MBX_EMBED) {
7535 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7536 shdr = &rsrc_ext->header.cfg_shdr;
7537 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7538 } else {
7539 virtaddr = mbox->sge_array->addr[0];
7540 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7541 shdr = &n_rsrc->cfg_shdr;
7542 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7543 }
7544
7545 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7546 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7547 "2984 Failed to read allocated resources "
7548 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7549 type,
7550 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7551 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7552 rc = -EIO;
7553 goto err_exit;
7554 }
7555 err_exit:
7556 lpfc_sli4_mbox_cmd_free(phba, mbox);
7557 return rc;
7558}
7559
7560/**
7561 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7562 * @phba: pointer to lpfc hba data structure.
7563 * @sgl_list: linked link of sgl buffers to post
7564 * @cnt: number of linked list buffers
7565 *
7566 * This routine walks the list of buffers that have been allocated and
7567 * repost them to the port by using SGL block post. This is needed after a
7568 * pci_function_reset/warm_start or start. It attempts to construct blocks
7569 * of buffer sgls which contains contiguous xris and uses the non-embedded
7570 * SGL block post mailbox commands to post them to the port. For single
7571 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7572 * mailbox command for posting.
7573 *
7574 * Returns: 0 = success, non-zero failure.
7575 **/
7576static int
7577lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7578 struct list_head *sgl_list, int cnt)
7579{
7580 struct lpfc_sglq *sglq_entry = NULL;
7581 struct lpfc_sglq *sglq_entry_next = NULL;
7582 struct lpfc_sglq *sglq_entry_first = NULL;
7583 int status = 0, total_cnt;
7584 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7585 int last_xritag = NO_XRI;
7586 LIST_HEAD(prep_sgl_list);
7587 LIST_HEAD(blck_sgl_list);
7588 LIST_HEAD(allc_sgl_list);
7589 LIST_HEAD(post_sgl_list);
7590 LIST_HEAD(free_sgl_list);
7591
7592 spin_lock_irq(&phba->hbalock);
7593 spin_lock(&phba->sli4_hba.sgl_list_lock);
7594 list_splice_init(sgl_list, &allc_sgl_list);
7595 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7596 spin_unlock_irq(&phba->hbalock);
7597
7598 total_cnt = cnt;
7599 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7600 &allc_sgl_list, list) {
7601 list_del_init(&sglq_entry->list);
7602 block_cnt++;
7603 if ((last_xritag != NO_XRI) &&
7604 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7605 /* a hole in xri block, form a sgl posting block */
7606 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7607 post_cnt = block_cnt - 1;
7608 /* prepare list for next posting block */
7609 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7610 block_cnt = 1;
7611 } else {
7612 /* prepare list for next posting block */
7613 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7614 /* enough sgls for non-embed sgl mbox command */
7615 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7616 list_splice_init(&prep_sgl_list,
7617 &blck_sgl_list);
7618 post_cnt = block_cnt;
7619 block_cnt = 0;
7620 }
7621 }
7622 num_posted++;
7623
7624 /* keep track of last sgl's xritag */
7625 last_xritag = sglq_entry->sli4_xritag;
7626
7627 /* end of repost sgl list condition for buffers */
7628 if (num_posted == total_cnt) {
7629 if (post_cnt == 0) {
7630 list_splice_init(&prep_sgl_list,
7631 &blck_sgl_list);
7632 post_cnt = block_cnt;
7633 } else if (block_cnt == 1) {
7634 status = lpfc_sli4_post_sgl(phba,
7635 sglq_entry->phys, 0,
7636 sglq_entry->sli4_xritag);
7637 if (!status) {
7638 /* successful, put sgl to posted list */
7639 list_add_tail(&sglq_entry->list,
7640 &post_sgl_list);
7641 } else {
7642 /* Failure, put sgl to free list */
7643 lpfc_printf_log(phba, KERN_WARNING,
7644 LOG_SLI,
7645 "3159 Failed to post "
7646 "sgl, xritag:x%x\n",
7647 sglq_entry->sli4_xritag);
7648 list_add_tail(&sglq_entry->list,
7649 &free_sgl_list);
7650 total_cnt--;
7651 }
7652 }
7653 }
7654
7655 /* continue until a nembed page worth of sgls */
7656 if (post_cnt == 0)
7657 continue;
7658
7659 /* post the buffer list sgls as a block */
7660 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7661 post_cnt);
7662
7663 if (!status) {
7664 /* success, put sgl list to posted sgl list */
7665 list_splice_init(&blck_sgl_list, &post_sgl_list);
7666 } else {
7667 /* Failure, put sgl list to free sgl list */
7668 sglq_entry_first = list_first_entry(&blck_sgl_list,
7669 struct lpfc_sglq,
7670 list);
7671 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7672 "3160 Failed to post sgl-list, "
7673 "xritag:x%x-x%x\n",
7674 sglq_entry_first->sli4_xritag,
7675 (sglq_entry_first->sli4_xritag +
7676 post_cnt - 1));
7677 list_splice_init(&blck_sgl_list, &free_sgl_list);
7678 total_cnt -= post_cnt;
7679 }
7680
7681 /* don't reset xirtag due to hole in xri block */
7682 if (block_cnt == 0)
7683 last_xritag = NO_XRI;
7684
7685 /* reset sgl post count for next round of posting */
7686 post_cnt = 0;
7687 }
7688
7689 /* free the sgls failed to post */
7690 lpfc_free_sgl_list(phba, &free_sgl_list);
7691
7692 /* push sgls posted to the available list */
7693 if (!list_empty(&post_sgl_list)) {
7694 spin_lock_irq(&phba->hbalock);
7695 spin_lock(&phba->sli4_hba.sgl_list_lock);
7696 list_splice_init(&post_sgl_list, sgl_list);
7697 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7698 spin_unlock_irq(&phba->hbalock);
7699 } else {
7700 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7701 "3161 Failure to post sgl to port,status %x "
7702 "blkcnt %d totalcnt %d postcnt %d\n",
7703 status, block_cnt, total_cnt, post_cnt);
7704 return -EIO;
7705 }
7706
7707 /* return the number of XRIs actually posted */
7708 return total_cnt;
7709}
7710
7711/**
7712 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7713 * @phba: pointer to lpfc hba data structure.
7714 *
7715 * This routine walks the list of nvme buffers that have been allocated and
7716 * repost them to the port by using SGL block post. This is needed after a
7717 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7718 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7719 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7720 *
7721 * Returns: 0 = success, non-zero failure.
7722 **/
7723static int
7724lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7725{
7726 LIST_HEAD(post_nblist);
7727 int num_posted, rc = 0;
7728
7729 /* get all NVME buffers need to repost to a local list */
7730 lpfc_io_buf_flush(phba, &post_nblist);
7731
7732 /* post the list of nvme buffer sgls to port if available */
7733 if (!list_empty(&post_nblist)) {
7734 num_posted = lpfc_sli4_post_io_sgl_list(
7735 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7736 /* failed to post any nvme buffer, return error */
7737 if (num_posted == 0)
7738 rc = -EIO;
7739 }
7740 return rc;
7741}
7742
7743static void
7744lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7745{
7746 uint32_t len;
7747
7748 len = sizeof(struct lpfc_mbx_set_host_data) -
7749 sizeof(struct lpfc_sli4_cfg_mhdr);
7750 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7751 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7752 LPFC_SLI4_MBX_EMBED);
7753
7754 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7755 mbox->u.mqe.un.set_host_data.param_len =
7756 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7757 snprintf(mbox->u.mqe.un.set_host_data.un.data,
7758 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7759 "Linux %s v"LPFC_DRIVER_VERSION,
7760 test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? "FCoE" : "FC");
7761}
7762
7763int
7764lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7765 struct lpfc_queue *drq, int count, int idx)
7766{
7767 int rc, i;
7768 struct lpfc_rqe hrqe;
7769 struct lpfc_rqe drqe;
7770 struct lpfc_rqb *rqbp;
7771 unsigned long flags;
7772 struct rqb_dmabuf *rqb_buffer;
7773 LIST_HEAD(rqb_buf_list);
7774
7775 rqbp = hrq->rqbp;
7776 for (i = 0; i < count; i++) {
7777 spin_lock_irqsave(&phba->hbalock, flags);
7778 /* IF RQ is already full, don't bother */
7779 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7780 spin_unlock_irqrestore(&phba->hbalock, flags);
7781 break;
7782 }
7783 spin_unlock_irqrestore(&phba->hbalock, flags);
7784
7785 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7786 if (!rqb_buffer)
7787 break;
7788 rqb_buffer->hrq = hrq;
7789 rqb_buffer->drq = drq;
7790 rqb_buffer->idx = idx;
7791 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7792 }
7793
7794 spin_lock_irqsave(&phba->hbalock, flags);
7795 while (!list_empty(&rqb_buf_list)) {
7796 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7797 hbuf.list);
7798
7799 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7800 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7801 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7802 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7803 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7804 if (rc < 0) {
7805 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7806 "6421 Cannot post to HRQ %d: %x %x %x "
7807 "DRQ %x %x\n",
7808 hrq->queue_id,
7809 hrq->host_index,
7810 hrq->hba_index,
7811 hrq->entry_count,
7812 drq->host_index,
7813 drq->hba_index);
7814 rqbp->rqb_free_buffer(phba, rqb_buffer);
7815 } else {
7816 list_add_tail(&rqb_buffer->hbuf.list,
7817 &rqbp->rqb_buffer_list);
7818 rqbp->buffer_count++;
7819 }
7820 }
7821 spin_unlock_irqrestore(&phba->hbalock, flags);
7822 return 1;
7823}
7824
7825static void
7826lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7827{
7828 union lpfc_sli4_cfg_shdr *shdr;
7829 u32 shdr_status, shdr_add_status;
7830
7831 shdr = (union lpfc_sli4_cfg_shdr *)
7832 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7833 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7834 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7835 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7836 lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX,
7837 "4622 SET_FEATURE (x%x) mbox failed, "
7838 "status x%x add_status x%x, mbx status x%x\n",
7839 LPFC_SET_LD_SIGNAL, shdr_status,
7840 shdr_add_status, pmb->u.mb.mbxStatus);
7841 phba->degrade_activate_threshold = 0;
7842 phba->degrade_deactivate_threshold = 0;
7843 phba->fec_degrade_interval = 0;
7844 goto out;
7845 }
7846
7847 phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7;
7848 phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8;
7849 phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10;
7850
7851 lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT,
7852 "4624 Success: da x%x dd x%x interval x%x\n",
7853 phba->degrade_activate_threshold,
7854 phba->degrade_deactivate_threshold,
7855 phba->fec_degrade_interval);
7856out:
7857 mempool_free(pmb, phba->mbox_mem_pool);
7858}
7859
7860int
7861lpfc_read_lds_params(struct lpfc_hba *phba)
7862{
7863 LPFC_MBOXQ_t *mboxq;
7864 int rc;
7865
7866 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7867 if (!mboxq)
7868 return -ENOMEM;
7869
7870 lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL);
7871 mboxq->vport = phba->pport;
7872 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params;
7873 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7874 if (rc == MBX_NOT_FINISHED) {
7875 mempool_free(mboxq, phba->mbox_mem_pool);
7876 return -EIO;
7877 }
7878 return 0;
7879}
7880
7881static void
7882lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7883{
7884 struct lpfc_vport *vport = pmb->vport;
7885 union lpfc_sli4_cfg_shdr *shdr;
7886 u32 shdr_status, shdr_add_status;
7887 u32 sig, acqe;
7888
7889 /* Two outcomes. (1) Set featurs was successul and EDC negotiation
7890 * is done. (2) Mailbox failed and send FPIN support only.
7891 */
7892 shdr = (union lpfc_sli4_cfg_shdr *)
7893 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7894 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7895 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7896 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
7898 "2516 CGN SET_FEATURE mbox failed with "
7899 "status x%x add_status x%x, mbx status x%x "
7900 "Reset Congestion to FPINs only\n",
7901 shdr_status, shdr_add_status,
7902 pmb->u.mb.mbxStatus);
7903 /* If there is a mbox error, move on to RDF */
7904 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7905 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7906 goto out;
7907 }
7908
7909 /* Zero out Congestion Signal ACQE counter */
7910 phba->cgn_acqe_cnt = 0;
7911
7912 acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
7913 &pmb->u.mqe.un.set_feature);
7914 sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
7915 &pmb->u.mqe.un.set_feature);
7916 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7917 "4620 SET_FEATURES Success: Freq: %ds %dms "
7918 " Reg: x%x x%x\n", acqe, sig,
7919 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7920out:
7921 mempool_free(pmb, phba->mbox_mem_pool);
7922
7923 /* Register for FPIN events from the fabric now that the
7924 * EDC common_set_features has completed.
7925 */
7926 lpfc_issue_els_rdf(vport, 0);
7927}
7928
7929int
7930lpfc_config_cgn_signal(struct lpfc_hba *phba)
7931{
7932 LPFC_MBOXQ_t *mboxq;
7933 u32 rc;
7934
7935 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7936 if (!mboxq)
7937 goto out_rdf;
7938
7939 lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
7940 mboxq->vport = phba->pport;
7941 mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
7942
7943 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7944 "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
7945 "Reg: x%x x%x\n",
7946 phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
7947 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7948
7949 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7950 if (rc == MBX_NOT_FINISHED)
7951 goto out;
7952 return 0;
7953
7954out:
7955 mempool_free(mboxq, phba->mbox_mem_pool);
7956out_rdf:
7957 /* If there is a mbox error, move on to RDF */
7958 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7959 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7960 lpfc_issue_els_rdf(phba->pport, 0);
7961 return -EIO;
7962}
7963
7964/**
7965 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7966 * @phba: pointer to lpfc hba data structure.
7967 *
7968 * This routine initializes the per-eq idle_stat to dynamically dictate
7969 * polling decisions.
7970 *
7971 * Return codes:
7972 * None
7973 **/
7974static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7975{
7976 int i;
7977 struct lpfc_sli4_hdw_queue *hdwq;
7978 struct lpfc_queue *eq;
7979 struct lpfc_idle_stat *idle_stat;
7980 u64 wall;
7981
7982 for_each_present_cpu(i) {
7983 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7984 eq = hdwq->hba_eq;
7985
7986 /* Skip if we've already handled this eq's primary CPU */
7987 if (eq->chann != i)
7988 continue;
7989
7990 idle_stat = &phba->sli4_hba.idle_stat[i];
7991
7992 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7993 idle_stat->prev_wall = wall;
7994
7995 if (phba->nvmet_support ||
7996 phba->cmf_active_mode != LPFC_CFG_OFF ||
7997 phba->intr_type != MSIX)
7998 eq->poll_mode = LPFC_QUEUE_WORK;
7999 else
8000 eq->poll_mode = LPFC_THREADED_IRQ;
8001 }
8002
8003 if (!phba->nvmet_support && phba->intr_type == MSIX)
8004 schedule_delayed_work(&phba->idle_stat_delay_work,
8005 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
8006}
8007
8008static void lpfc_sli4_dip(struct lpfc_hba *phba)
8009{
8010 uint32_t if_type;
8011
8012 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8013 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
8014 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
8015 struct lpfc_register reg_data;
8016
8017 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8018 ®_data.word0))
8019 return;
8020
8021 if (bf_get(lpfc_sliport_status_dip, ®_data))
8022 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8023 "2904 Firmware Dump Image Present"
8024 " on Adapter");
8025 }
8026}
8027
8028/**
8029 * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
8030 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8031 * @entries: Number of rx_info_entry objects to allocate in ring
8032 *
8033 * Return:
8034 * 0 - Success
8035 * ENOMEM - Failure to kmalloc
8036 **/
8037int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
8038 u32 entries)
8039{
8040 rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry),
8041 GFP_KERNEL);
8042 if (!rx_monitor->ring)
8043 return -ENOMEM;
8044
8045 rx_monitor->head_idx = 0;
8046 rx_monitor->tail_idx = 0;
8047 spin_lock_init(&rx_monitor->lock);
8048 rx_monitor->entries = entries;
8049
8050 return 0;
8051}
8052
8053/**
8054 * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
8055 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8056 *
8057 * Called after cancellation of cmf_timer.
8058 **/
8059void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
8060{
8061 kfree(rx_monitor->ring);
8062 rx_monitor->ring = NULL;
8063 rx_monitor->entries = 0;
8064 rx_monitor->head_idx = 0;
8065 rx_monitor->tail_idx = 0;
8066}
8067
8068/**
8069 * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
8070 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8071 * @entry: Pointer to rx_info_entry
8072 *
8073 * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a
8074 * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
8075 *
8076 * This is called from lpfc_cmf_timer, which is in timer/softirq context.
8077 *
8078 * In cases of old data overflow, we do a best effort of FIFO order.
8079 **/
8080void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
8081 struct rx_info_entry *entry)
8082{
8083 struct rx_info_entry *ring = rx_monitor->ring;
8084 u32 *head_idx = &rx_monitor->head_idx;
8085 u32 *tail_idx = &rx_monitor->tail_idx;
8086 spinlock_t *ring_lock = &rx_monitor->lock;
8087 u32 ring_size = rx_monitor->entries;
8088
8089 spin_lock(ring_lock);
8090 memcpy(&ring[*tail_idx], entry, sizeof(*entry));
8091 *tail_idx = (*tail_idx + 1) % ring_size;
8092
8093 /* Best effort of FIFO saved data */
8094 if (*tail_idx == *head_idx)
8095 *head_idx = (*head_idx + 1) % ring_size;
8096
8097 spin_unlock(ring_lock);
8098}
8099
8100/**
8101 * lpfc_rx_monitor_report - Read out rx_monitor's ring
8102 * @phba: Pointer to lpfc_hba object
8103 * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8104 * @buf: Pointer to char buffer that will contain rx monitor info data
8105 * @buf_len: Length buf including null char
8106 * @max_read_entries: Maximum number of entries to read out of ring
8107 *
8108 * Used to dump/read what's in rx_monitor's ring buffer.
8109 *
8110 * If buf is NULL || buf_len == 0, then it is implied that we want to log the
8111 * information to kmsg instead of filling out buf.
8112 *
8113 * Return:
8114 * Number of entries read out of the ring
8115 **/
8116u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
8117 struct lpfc_rx_info_monitor *rx_monitor, char *buf,
8118 u32 buf_len, u32 max_read_entries)
8119{
8120 struct rx_info_entry *ring = rx_monitor->ring;
8121 struct rx_info_entry *entry;
8122 u32 *head_idx = &rx_monitor->head_idx;
8123 u32 *tail_idx = &rx_monitor->tail_idx;
8124 spinlock_t *ring_lock = &rx_monitor->lock;
8125 u32 ring_size = rx_monitor->entries;
8126 u32 cnt = 0;
8127 char tmp[DBG_LOG_STR_SZ] = {0};
8128 bool log_to_kmsg = (!buf || !buf_len) ? true : false;
8129
8130 if (!log_to_kmsg) {
8131 /* clear the buffer to be sure */
8132 memset(buf, 0, buf_len);
8133
8134 scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
8135 "%-8s%-8s%-8s%-16s\n",
8136 "MaxBPI", "Tot_Data_CMF",
8137 "Tot_Data_Cmd", "Tot_Data_Cmpl",
8138 "Lat(us)", "Avg_IO", "Max_IO", "Bsy",
8139 "IO_cnt", "Info", "BWutil(ms)");
8140 }
8141
8142 /* Needs to be _irq because record is called from timer interrupt
8143 * context
8144 */
8145 spin_lock_irq(ring_lock);
8146 while (*head_idx != *tail_idx) {
8147 entry = &ring[*head_idx];
8148
8149 /* Read out this entry's data. */
8150 if (!log_to_kmsg) {
8151 /* If !log_to_kmsg, then store to buf. */
8152 scnprintf(tmp, sizeof(tmp),
8153 "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
8154 "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
8155 *head_idx, entry->max_bytes_per_interval,
8156 entry->cmf_bytes, entry->total_bytes,
8157 entry->rcv_bytes, entry->avg_io_latency,
8158 entry->avg_io_size, entry->max_read_cnt,
8159 entry->cmf_busy, entry->io_cnt,
8160 entry->cmf_info, entry->timer_utilization,
8161 entry->timer_interval);
8162
8163 /* Check for buffer overflow */
8164 if ((strlen(buf) + strlen(tmp)) >= buf_len)
8165 break;
8166
8167 /* Append entry's data to buffer */
8168 strlcat(buf, tmp, buf_len);
8169 } else {
8170 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
8171 "4410 %02u: MBPI %llu Xmit %llu "
8172 "Cmpl %llu Lat %llu ASz %llu Info %02u "
8173 "BWUtil %u Int %u slot %u\n",
8174 cnt, entry->max_bytes_per_interval,
8175 entry->total_bytes, entry->rcv_bytes,
8176 entry->avg_io_latency,
8177 entry->avg_io_size, entry->cmf_info,
8178 entry->timer_utilization,
8179 entry->timer_interval, *head_idx);
8180 }
8181
8182 *head_idx = (*head_idx + 1) % ring_size;
8183
8184 /* Don't feed more than max_read_entries */
8185 cnt++;
8186 if (cnt >= max_read_entries)
8187 break;
8188 }
8189 spin_unlock_irq(ring_lock);
8190
8191 return cnt;
8192}
8193
8194/**
8195 * lpfc_cmf_setup - Initialize idle_stat tracking
8196 * @phba: Pointer to HBA context object.
8197 *
8198 * This is called from HBA setup during driver load or when the HBA
8199 * comes online. this does all the initialization to support CMF and MI.
8200 **/
8201static int
8202lpfc_cmf_setup(struct lpfc_hba *phba)
8203{
8204 LPFC_MBOXQ_t *mboxq;
8205 struct lpfc_dmabuf *mp;
8206 struct lpfc_pc_sli4_params *sli4_params;
8207 int rc, cmf, mi_ver;
8208
8209 rc = lpfc_sli4_refresh_params(phba);
8210 if (unlikely(rc))
8211 return rc;
8212
8213 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8214 if (!mboxq)
8215 return -ENOMEM;
8216
8217 sli4_params = &phba->sli4_hba.pc_sli4_params;
8218
8219 /* Always try to enable MI feature if we can */
8220 if (sli4_params->mi_ver) {
8221 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
8222 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8223 mi_ver = bf_get(lpfc_mbx_set_feature_mi,
8224 &mboxq->u.mqe.un.set_feature);
8225
8226 if (rc == MBX_SUCCESS) {
8227 if (mi_ver) {
8228 lpfc_printf_log(phba,
8229 KERN_WARNING, LOG_CGN_MGMT,
8230 "6215 MI is enabled\n");
8231 sli4_params->mi_ver = mi_ver;
8232 } else {
8233 lpfc_printf_log(phba,
8234 KERN_WARNING, LOG_CGN_MGMT,
8235 "6338 MI is disabled\n");
8236 sli4_params->mi_ver = 0;
8237 }
8238 } else {
8239 /* mi_ver is already set from GET_SLI4_PARAMETERS */
8240 lpfc_printf_log(phba, KERN_INFO,
8241 LOG_CGN_MGMT | LOG_INIT,
8242 "6245 Enable MI Mailbox x%x (x%x/x%x) "
8243 "failed, rc:x%x mi:x%x\n",
8244 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8245 lpfc_sli_config_mbox_subsys_get
8246 (phba, mboxq),
8247 lpfc_sli_config_mbox_opcode_get
8248 (phba, mboxq),
8249 rc, sli4_params->mi_ver);
8250 }
8251 } else {
8252 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8253 "6217 MI is disabled\n");
8254 }
8255
8256 /* Ensure FDMI is enabled for MI if enable_mi is set */
8257 if (sli4_params->mi_ver)
8258 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
8259
8260 /* Always try to enable CMF feature if we can */
8261 if (sli4_params->cmf) {
8262 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
8263 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8264 cmf = bf_get(lpfc_mbx_set_feature_cmf,
8265 &mboxq->u.mqe.un.set_feature);
8266 if (rc == MBX_SUCCESS && cmf) {
8267 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8268 "6218 CMF is enabled: mode %d\n",
8269 phba->cmf_active_mode);
8270 } else {
8271 lpfc_printf_log(phba, KERN_WARNING,
8272 LOG_CGN_MGMT | LOG_INIT,
8273 "6219 Enable CMF Mailbox x%x (x%x/x%x) "
8274 "failed, rc:x%x dd:x%x\n",
8275 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8276 lpfc_sli_config_mbox_subsys_get
8277 (phba, mboxq),
8278 lpfc_sli_config_mbox_opcode_get
8279 (phba, mboxq),
8280 rc, cmf);
8281 sli4_params->cmf = 0;
8282 phba->cmf_active_mode = LPFC_CFG_OFF;
8283 goto no_cmf;
8284 }
8285
8286 /* Allocate Congestion Information Buffer */
8287 if (!phba->cgn_i) {
8288 mp = kmalloc(sizeof(*mp), GFP_KERNEL);
8289 if (mp)
8290 mp->virt = dma_alloc_coherent
8291 (&phba->pcidev->dev,
8292 sizeof(struct lpfc_cgn_info),
8293 &mp->phys, GFP_KERNEL);
8294 if (!mp || !mp->virt) {
8295 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8296 "2640 Failed to alloc memory "
8297 "for Congestion Info\n");
8298 kfree(mp);
8299 sli4_params->cmf = 0;
8300 phba->cmf_active_mode = LPFC_CFG_OFF;
8301 goto no_cmf;
8302 }
8303 phba->cgn_i = mp;
8304
8305 /* initialize congestion buffer info */
8306 lpfc_init_congestion_buf(phba);
8307 lpfc_init_congestion_stat(phba);
8308
8309 /* Zero out Congestion Signal counters */
8310 atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
8311 atomic64_set(&phba->cgn_acqe_stat.warn, 0);
8312 }
8313
8314 rc = lpfc_sli4_cgn_params_read(phba);
8315 if (rc < 0) {
8316 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8317 "6242 Error reading Cgn Params (%d)\n",
8318 rc);
8319 /* Ensure CGN Mode is off */
8320 sli4_params->cmf = 0;
8321 } else if (!rc) {
8322 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8323 "6243 CGN Event empty object.\n");
8324 /* Ensure CGN Mode is off */
8325 sli4_params->cmf = 0;
8326 }
8327 } else {
8328no_cmf:
8329 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8330 "6220 CMF is disabled\n");
8331 }
8332
8333 /* Only register congestion buffer with firmware if BOTH
8334 * CMF and E2E are enabled.
8335 */
8336 if (sli4_params->cmf && sli4_params->mi_ver) {
8337 rc = lpfc_reg_congestion_buf(phba);
8338 if (rc) {
8339 dma_free_coherent(&phba->pcidev->dev,
8340 sizeof(struct lpfc_cgn_info),
8341 phba->cgn_i->virt, phba->cgn_i->phys);
8342 kfree(phba->cgn_i);
8343 phba->cgn_i = NULL;
8344 /* Ensure CGN Mode is off */
8345 phba->cmf_active_mode = LPFC_CFG_OFF;
8346 sli4_params->cmf = 0;
8347 return 0;
8348 }
8349 }
8350 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8351 "6470 Setup MI version %d CMF %d mode %d\n",
8352 sli4_params->mi_ver, sli4_params->cmf,
8353 phba->cmf_active_mode);
8354
8355 mempool_free(mboxq, phba->mbox_mem_pool);
8356
8357 /* Initialize atomic counters */
8358 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
8359 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
8360 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
8361 atomic_set(&phba->cgn_sync_warn_cnt, 0);
8362 atomic_set(&phba->cgn_driver_evt_cnt, 0);
8363 atomic_set(&phba->cgn_latency_evt_cnt, 0);
8364 atomic64_set(&phba->cgn_latency_evt, 0);
8365
8366 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
8367
8368 /* Allocate RX Monitor Buffer */
8369 if (!phba->rx_monitor) {
8370 phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor),
8371 GFP_KERNEL);
8372
8373 if (!phba->rx_monitor) {
8374 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8375 "2644 Failed to alloc memory "
8376 "for RX Monitor Buffer\n");
8377 return -ENOMEM;
8378 }
8379
8380 /* Instruct the rx_monitor object to instantiate its ring */
8381 if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
8382 LPFC_MAX_RXMONITOR_ENTRY)) {
8383 kfree(phba->rx_monitor);
8384 phba->rx_monitor = NULL;
8385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8386 "2645 Failed to alloc memory "
8387 "for RX Monitor's Ring\n");
8388 return -ENOMEM;
8389 }
8390 }
8391
8392 return 0;
8393}
8394
8395static int
8396lpfc_set_host_tm(struct lpfc_hba *phba)
8397{
8398 LPFC_MBOXQ_t *mboxq;
8399 uint32_t len, rc;
8400 struct timespec64 cur_time;
8401 struct tm broken;
8402 uint32_t month, day, year;
8403 uint32_t hour, minute, second;
8404 struct lpfc_mbx_set_host_date_time *tm;
8405
8406 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8407 if (!mboxq)
8408 return -ENOMEM;
8409
8410 len = sizeof(struct lpfc_mbx_set_host_data) -
8411 sizeof(struct lpfc_sli4_cfg_mhdr);
8412 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8413 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
8414 LPFC_SLI4_MBX_EMBED);
8415
8416 mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
8417 mboxq->u.mqe.un.set_host_data.param_len =
8418 sizeof(struct lpfc_mbx_set_host_date_time);
8419 tm = &mboxq->u.mqe.un.set_host_data.un.tm;
8420 ktime_get_real_ts64(&cur_time);
8421 time64_to_tm(cur_time.tv_sec, 0, &broken);
8422 month = broken.tm_mon + 1;
8423 day = broken.tm_mday;
8424 year = broken.tm_year - 100;
8425 hour = broken.tm_hour;
8426 minute = broken.tm_min;
8427 second = broken.tm_sec;
8428 bf_set(lpfc_mbx_set_host_month, tm, month);
8429 bf_set(lpfc_mbx_set_host_day, tm, day);
8430 bf_set(lpfc_mbx_set_host_year, tm, year);
8431 bf_set(lpfc_mbx_set_host_hour, tm, hour);
8432 bf_set(lpfc_mbx_set_host_min, tm, minute);
8433 bf_set(lpfc_mbx_set_host_sec, tm, second);
8434
8435 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8436 mempool_free(mboxq, phba->mbox_mem_pool);
8437 return rc;
8438}
8439
8440/**
8441 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
8442 * @phba: Pointer to HBA context object.
8443 *
8444 * This function is the main SLI4 device initialization PCI function. This
8445 * function is called by the HBA initialization code, HBA reset code and
8446 * HBA error attention handler code. Caller is not required to hold any
8447 * locks.
8448 **/
8449int
8450lpfc_sli4_hba_setup(struct lpfc_hba *phba)
8451{
8452 int rc, i, cnt, len, dd;
8453 LPFC_MBOXQ_t *mboxq;
8454 struct lpfc_mqe *mqe;
8455 uint8_t *vpd;
8456 uint32_t vpd_size;
8457 uint32_t ftr_rsp = 0;
8458 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
8459 struct lpfc_vport *vport = phba->pport;
8460 struct lpfc_dmabuf *mp;
8461 struct lpfc_rqb *rqbp;
8462 u32 flg;
8463
8464 /* Perform a PCI function reset to start from clean */
8465 rc = lpfc_pci_function_reset(phba);
8466 if (unlikely(rc))
8467 return -ENODEV;
8468
8469 /* Check the HBA Host Status Register for readyness */
8470 rc = lpfc_sli4_post_status_check(phba);
8471 if (unlikely(rc))
8472 return -ENODEV;
8473 else {
8474 spin_lock_irq(&phba->hbalock);
8475 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
8476 flg = phba->sli.sli_flag;
8477 spin_unlock_irq(&phba->hbalock);
8478 /* Allow a little time after setting SLI_ACTIVE for any polled
8479 * MBX commands to complete via BSG.
8480 */
8481 for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
8482 msleep(20);
8483 spin_lock_irq(&phba->hbalock);
8484 flg = phba->sli.sli_flag;
8485 spin_unlock_irq(&phba->hbalock);
8486 }
8487 }
8488 clear_bit(HBA_SETUP, &phba->hba_flag);
8489
8490 lpfc_sli4_dip(phba);
8491
8492 /*
8493 * Allocate a single mailbox container for initializing the
8494 * port.
8495 */
8496 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8497 if (!mboxq)
8498 return -ENOMEM;
8499
8500 /* Issue READ_REV to collect vpd and FW information. */
8501 vpd_size = SLI4_PAGE_SIZE;
8502 vpd = kzalloc(vpd_size, GFP_KERNEL);
8503 if (!vpd) {
8504 rc = -ENOMEM;
8505 goto out_free_mbox;
8506 }
8507
8508 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
8509 if (unlikely(rc)) {
8510 kfree(vpd);
8511 goto out_free_mbox;
8512 }
8513
8514 mqe = &mboxq->u.mqe;
8515 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
8516 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
8517 set_bit(HBA_FCOE_MODE, &phba->hba_flag);
8518 phba->fcp_embed_io = 0; /* SLI4 FC support only */
8519 } else {
8520 clear_bit(HBA_FCOE_MODE, &phba->hba_flag);
8521 }
8522
8523 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
8524 LPFC_DCBX_CEE_MODE)
8525 set_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
8526 else
8527 clear_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
8528
8529 clear_bit(HBA_IOQ_FLUSH, &phba->hba_flag);
8530
8531 if (phba->sli_rev != LPFC_SLI_REV4) {
8532 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8533 "0376 READ_REV Error. SLI Level %d "
8534 "FCoE enabled %d\n",
8535 phba->sli_rev,
8536 test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? 1 : 0);
8537 rc = -EIO;
8538 kfree(vpd);
8539 goto out_free_mbox;
8540 }
8541
8542 rc = lpfc_set_host_tm(phba);
8543 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
8544 "6468 Set host date / time: Status x%x:\n", rc);
8545
8546 /*
8547 * Continue initialization with default values even if driver failed
8548 * to read FCoE param config regions, only read parameters if the
8549 * board is FCoE
8550 */
8551 if (test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
8552 lpfc_sli4_read_fcoe_params(phba))
8553 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
8554 "2570 Failed to read FCoE parameters\n");
8555
8556 /*
8557 * Retrieve sli4 device physical port name, failure of doing it
8558 * is considered as non-fatal.
8559 */
8560 rc = lpfc_sli4_retrieve_pport_name(phba);
8561 if (!rc)
8562 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8563 "3080 Successful retrieving SLI4 device "
8564 "physical port name: %s.\n", phba->Port);
8565
8566 rc = lpfc_sli4_get_ctl_attr(phba);
8567 if (!rc)
8568 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8569 "8351 Successful retrieving SLI4 device "
8570 "CTL ATTR\n");
8571
8572 /*
8573 * Evaluate the read rev and vpd data. Populate the driver
8574 * state with the results. If this routine fails, the failure
8575 * is not fatal as the driver will use generic values.
8576 */
8577 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
8578 if (unlikely(!rc))
8579 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8580 "0377 Error %d parsing vpd. "
8581 "Using defaults.\n", rc);
8582 kfree(vpd);
8583
8584 /* Save information as VPD data */
8585 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
8586 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
8587
8588 /*
8589 * This is because first G7 ASIC doesn't support the standard
8590 * 0x5a NVME cmd descriptor type/subtype
8591 */
8592 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8593 LPFC_SLI_INTF_IF_TYPE_6) &&
8594 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
8595 (phba->vpd.rev.smRev == 0) &&
8596 (phba->cfg_nvme_embed_cmd == 1))
8597 phba->cfg_nvme_embed_cmd = 0;
8598
8599 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
8600 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
8601 &mqe->un.read_rev);
8602 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
8603 &mqe->un.read_rev);
8604 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
8605 &mqe->un.read_rev);
8606 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
8607 &mqe->un.read_rev);
8608 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
8609 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
8610 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
8611 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
8612 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
8613 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
8614 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8615 "(%d):0380 READ_REV Status x%x "
8616 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
8617 mboxq->vport ? mboxq->vport->vpi : 0,
8618 bf_get(lpfc_mqe_status, mqe),
8619 phba->vpd.rev.opFwName,
8620 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
8621 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
8622
8623 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8624 LPFC_SLI_INTF_IF_TYPE_0) {
8625 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
8626 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8627 if (rc == MBX_SUCCESS) {
8628 set_bit(HBA_RECOVERABLE_UE, &phba->hba_flag);
8629 /* Set 1Sec interval to detect UE */
8630 phba->eratt_poll_interval = 1;
8631 phba->sli4_hba.ue_to_sr = bf_get(
8632 lpfc_mbx_set_feature_UESR,
8633 &mboxq->u.mqe.un.set_feature);
8634 phba->sli4_hba.ue_to_rp = bf_get(
8635 lpfc_mbx_set_feature_UERP,
8636 &mboxq->u.mqe.un.set_feature);
8637 }
8638 }
8639
8640 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
8641 /* Enable MDS Diagnostics only if the SLI Port supports it */
8642 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
8643 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8644 if (rc != MBX_SUCCESS)
8645 phba->mds_diags_support = 0;
8646 }
8647
8648 /*
8649 * Discover the port's supported feature set and match it against the
8650 * hosts requests.
8651 */
8652 lpfc_request_features(phba, mboxq);
8653 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8654 if (unlikely(rc)) {
8655 rc = -EIO;
8656 goto out_free_mbox;
8657 }
8658
8659 /* Disable VMID if app header is not supported */
8660 if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
8661 &mqe->un.req_ftrs))) {
8662 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
8663 phba->cfg_vmid_app_header = 0;
8664 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
8665 "1242 vmid feature not supported\n");
8666 }
8667
8668 /*
8669 * The port must support FCP initiator mode as this is the
8670 * only mode running in the host.
8671 */
8672 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
8673 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8674 "0378 No support for fcpi mode.\n");
8675 ftr_rsp++;
8676 }
8677
8678 /* Performance Hints are ONLY for FCoE */
8679 if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
8680 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
8681 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
8682 else
8683 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
8684 }
8685
8686 /*
8687 * If the port cannot support the host's requested features
8688 * then turn off the global config parameters to disable the
8689 * feature in the driver. This is not a fatal error.
8690 */
8691 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8692 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
8693 phba->cfg_enable_bg = 0;
8694 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
8695 ftr_rsp++;
8696 }
8697 }
8698
8699 if (phba->max_vpi && phba->cfg_enable_npiv &&
8700 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8701 ftr_rsp++;
8702
8703 if (ftr_rsp) {
8704 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8705 "0379 Feature Mismatch Data: x%08x %08x "
8706 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
8707 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
8708 phba->cfg_enable_npiv, phba->max_vpi);
8709 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
8710 phba->cfg_enable_bg = 0;
8711 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8712 phba->cfg_enable_npiv = 0;
8713 }
8714
8715 /* These SLI3 features are assumed in SLI4 */
8716 spin_lock_irq(&phba->hbalock);
8717 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
8718 spin_unlock_irq(&phba->hbalock);
8719
8720 /* Always try to enable dual dump feature if we can */
8721 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
8722 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8723 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
8724 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
8725 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8726 "6448 Dual Dump is enabled\n");
8727 else
8728 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
8729 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
8730 "rc:x%x dd:x%x\n",
8731 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8732 lpfc_sli_config_mbox_subsys_get(
8733 phba, mboxq),
8734 lpfc_sli_config_mbox_opcode_get(
8735 phba, mboxq),
8736 rc, dd);
8737 /*
8738 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
8739 * calls depends on these resources to complete port setup.
8740 */
8741 rc = lpfc_sli4_alloc_resource_identifiers(phba);
8742 if (rc) {
8743 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8744 "2920 Failed to alloc Resource IDs "
8745 "rc = x%x\n", rc);
8746 goto out_free_mbox;
8747 }
8748
8749 lpfc_set_host_data(phba, mboxq);
8750
8751 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8752 if (rc) {
8753 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8754 "2134 Failed to set host os driver version %x",
8755 rc);
8756 }
8757
8758 /* Read the port's service parameters. */
8759 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
8760 if (rc) {
8761 phba->link_state = LPFC_HBA_ERROR;
8762 rc = -ENOMEM;
8763 goto out_free_mbox;
8764 }
8765
8766 mboxq->vport = vport;
8767 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8768 mp = mboxq->ctx_buf;
8769 if (rc == MBX_SUCCESS) {
8770 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
8771 rc = 0;
8772 }
8773
8774 /*
8775 * This memory was allocated by the lpfc_read_sparam routine but is
8776 * no longer needed. It is released and ctx_buf NULLed to prevent
8777 * unintended pointer access as the mbox is reused.
8778 */
8779 lpfc_mbuf_free(phba, mp->virt, mp->phys);
8780 kfree(mp);
8781 mboxq->ctx_buf = NULL;
8782 if (unlikely(rc)) {
8783 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8784 "0382 READ_SPARAM command failed "
8785 "status %d, mbxStatus x%x\n",
8786 rc, bf_get(lpfc_mqe_status, mqe));
8787 phba->link_state = LPFC_HBA_ERROR;
8788 rc = -EIO;
8789 goto out_free_mbox;
8790 }
8791
8792 lpfc_update_vport_wwn(vport);
8793
8794 /* Update the fc_host data structures with new wwn. */
8795 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
8796 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
8797
8798 /* Create all the SLI4 queues */
8799 rc = lpfc_sli4_queue_create(phba);
8800 if (rc) {
8801 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8802 "3089 Failed to allocate queues\n");
8803 rc = -ENODEV;
8804 goto out_free_mbox;
8805 }
8806 /* Set up all the queues to the device */
8807 rc = lpfc_sli4_queue_setup(phba);
8808 if (unlikely(rc)) {
8809 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8810 "0381 Error %d during queue setup.\n ", rc);
8811 goto out_stop_timers;
8812 }
8813 /* Initialize the driver internal SLI layer lists. */
8814 lpfc_sli4_setup(phba);
8815 lpfc_sli4_queue_init(phba);
8816
8817 /* update host els xri-sgl sizes and mappings */
8818 rc = lpfc_sli4_els_sgl_update(phba);
8819 if (unlikely(rc)) {
8820 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8821 "1400 Failed to update xri-sgl size and "
8822 "mapping: %d\n", rc);
8823 goto out_destroy_queue;
8824 }
8825
8826 /* register the els sgl pool to the port */
8827 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
8828 phba->sli4_hba.els_xri_cnt);
8829 if (unlikely(rc < 0)) {
8830 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8831 "0582 Error %d during els sgl post "
8832 "operation\n", rc);
8833 rc = -ENODEV;
8834 goto out_destroy_queue;
8835 }
8836 phba->sli4_hba.els_xri_cnt = rc;
8837
8838 if (phba->nvmet_support) {
8839 /* update host nvmet xri-sgl sizes and mappings */
8840 rc = lpfc_sli4_nvmet_sgl_update(phba);
8841 if (unlikely(rc)) {
8842 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8843 "6308 Failed to update nvmet-sgl size "
8844 "and mapping: %d\n", rc);
8845 goto out_destroy_queue;
8846 }
8847
8848 /* register the nvmet sgl pool to the port */
8849 rc = lpfc_sli4_repost_sgl_list(
8850 phba,
8851 &phba->sli4_hba.lpfc_nvmet_sgl_list,
8852 phba->sli4_hba.nvmet_xri_cnt);
8853 if (unlikely(rc < 0)) {
8854 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8855 "3117 Error %d during nvmet "
8856 "sgl post\n", rc);
8857 rc = -ENODEV;
8858 goto out_destroy_queue;
8859 }
8860 phba->sli4_hba.nvmet_xri_cnt = rc;
8861
8862 /* We allocate an iocbq for every receive context SGL.
8863 * The additional allocation is for abort and ls handling.
8864 */
8865 cnt = phba->sli4_hba.nvmet_xri_cnt +
8866 phba->sli4_hba.max_cfg_param.max_xri;
8867 } else {
8868 /* update host common xri-sgl sizes and mappings */
8869 rc = lpfc_sli4_io_sgl_update(phba);
8870 if (unlikely(rc)) {
8871 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8872 "6082 Failed to update nvme-sgl size "
8873 "and mapping: %d\n", rc);
8874 goto out_destroy_queue;
8875 }
8876
8877 /* register the allocated common sgl pool to the port */
8878 rc = lpfc_sli4_repost_io_sgl_list(phba);
8879 if (unlikely(rc)) {
8880 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8881 "6116 Error %d during nvme sgl post "
8882 "operation\n", rc);
8883 /* Some NVME buffers were moved to abort nvme list */
8884 /* A pci function reset will repost them */
8885 rc = -ENODEV;
8886 goto out_destroy_queue;
8887 }
8888 /* Each lpfc_io_buf job structure has an iocbq element.
8889 * This cnt provides for abort, els, ct and ls requests.
8890 */
8891 cnt = phba->sli4_hba.max_cfg_param.max_xri;
8892 }
8893
8894 if (!phba->sli.iocbq_lookup) {
8895 /* Initialize and populate the iocb list per host */
8896 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8897 "2821 initialize iocb list with %d entries\n",
8898 cnt);
8899 rc = lpfc_init_iocb_list(phba, cnt);
8900 if (rc) {
8901 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8902 "1413 Failed to init iocb list.\n");
8903 goto out_destroy_queue;
8904 }
8905 }
8906
8907 if (phba->nvmet_support)
8908 lpfc_nvmet_create_targetport(phba);
8909
8910 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
8911 /* Post initial buffers to all RQs created */
8912 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
8913 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
8914 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
8915 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
8916 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
8917 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
8918 rqbp->buffer_count = 0;
8919
8920 lpfc_post_rq_buffer(
8921 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
8922 phba->sli4_hba.nvmet_mrq_data[i],
8923 phba->cfg_nvmet_mrq_post, i);
8924 }
8925 }
8926
8927 /* Post the rpi header region to the device. */
8928 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
8929 if (unlikely(rc)) {
8930 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8931 "0393 Error %d during rpi post operation\n",
8932 rc);
8933 rc = -ENODEV;
8934 goto out_free_iocblist;
8935 }
8936 lpfc_sli4_node_prep(phba);
8937
8938 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
8939 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
8940 /*
8941 * The FC Port needs to register FCFI (index 0)
8942 */
8943 lpfc_reg_fcfi(phba, mboxq);
8944 mboxq->vport = phba->pport;
8945 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8946 if (rc != MBX_SUCCESS)
8947 goto out_unset_queue;
8948 rc = 0;
8949 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
8950 &mboxq->u.mqe.un.reg_fcfi);
8951 } else {
8952 /* We are a NVME Target mode with MRQ > 1 */
8953
8954 /* First register the FCFI */
8955 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
8956 mboxq->vport = phba->pport;
8957 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8958 if (rc != MBX_SUCCESS)
8959 goto out_unset_queue;
8960 rc = 0;
8961 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8962 &mboxq->u.mqe.un.reg_fcfi_mrq);
8963
8964 /* Next register the MRQs */
8965 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8966 mboxq->vport = phba->pport;
8967 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8968 if (rc != MBX_SUCCESS)
8969 goto out_unset_queue;
8970 rc = 0;
8971 }
8972 /* Check if the port is configured to be disabled */
8973 lpfc_sli_read_link_ste(phba);
8974 }
8975
8976 /* Don't post more new bufs if repost already recovered
8977 * the nvme sgls.
8978 */
8979 if (phba->nvmet_support == 0) {
8980 if (phba->sli4_hba.io_xri_cnt == 0) {
8981 len = lpfc_new_io_buf(
8982 phba, phba->sli4_hba.io_xri_max);
8983 if (len == 0) {
8984 rc = -ENOMEM;
8985 goto out_unset_queue;
8986 }
8987
8988 if (phba->cfg_xri_rebalancing)
8989 lpfc_create_multixri_pools(phba);
8990 }
8991 } else {
8992 phba->cfg_xri_rebalancing = 0;
8993 }
8994
8995 /* Allow asynchronous mailbox command to go through */
8996 spin_lock_irq(&phba->hbalock);
8997 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8998 spin_unlock_irq(&phba->hbalock);
8999
9000 /* Post receive buffers to the device */
9001 lpfc_sli4_rb_setup(phba);
9002
9003 /* Reset HBA FCF states after HBA reset */
9004 phba->fcf.fcf_flag = 0;
9005 phba->fcf.current_rec.flag = 0;
9006
9007 /* Start the ELS watchdog timer */
9008 mod_timer(&vport->els_tmofunc,
9009 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
9010
9011 /* Start heart beat timer */
9012 mod_timer(&phba->hb_tmofunc,
9013 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
9014 clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
9015 clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
9016 phba->last_completion_time = jiffies;
9017
9018 /* start eq_delay heartbeat */
9019 if (phba->cfg_auto_imax)
9020 queue_delayed_work(phba->wq, &phba->eq_delay_work,
9021 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
9022
9023 /* start per phba idle_stat_delay heartbeat */
9024 lpfc_init_idle_stat_hb(phba);
9025
9026 /* Start error attention (ERATT) polling timer */
9027 mod_timer(&phba->eratt_poll,
9028 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
9029
9030 /*
9031 * The port is ready, set the host's link state to LINK_DOWN
9032 * in preparation for link interrupts.
9033 */
9034 spin_lock_irq(&phba->hbalock);
9035 phba->link_state = LPFC_LINK_DOWN;
9036
9037 /* Check if physical ports are trunked */
9038 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
9039 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
9040 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
9041 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
9042 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
9043 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
9044 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
9045 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
9046 spin_unlock_irq(&phba->hbalock);
9047
9048 /* Arm the CQs and then EQs on device */
9049 lpfc_sli4_arm_cqeq_intr(phba);
9050
9051 /* Indicate device interrupt mode */
9052 phba->sli4_hba.intr_enable = 1;
9053
9054 /* Setup CMF after HBA is initialized */
9055 lpfc_cmf_setup(phba);
9056
9057 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
9058 test_bit(LINK_DISABLED, &phba->hba_flag)) {
9059 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9060 "3103 Adapter Link is disabled.\n");
9061 lpfc_down_link(phba, mboxq);
9062 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9063 if (rc != MBX_SUCCESS) {
9064 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9065 "3104 Adapter failed to issue "
9066 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
9067 goto out_io_buff_free;
9068 }
9069 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
9070 /* don't perform init_link on SLI4 FC port loopback test */
9071 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
9072 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
9073 if (rc)
9074 goto out_io_buff_free;
9075 }
9076 }
9077 mempool_free(mboxq, phba->mbox_mem_pool);
9078
9079 /* Enable RAS FW log support */
9080 lpfc_sli4_ras_setup(phba);
9081
9082 set_bit(HBA_SETUP, &phba->hba_flag);
9083 return rc;
9084
9085out_io_buff_free:
9086 /* Free allocated IO Buffers */
9087 lpfc_io_free(phba);
9088out_unset_queue:
9089 /* Unset all the queues set up in this routine when error out */
9090 lpfc_sli4_queue_unset(phba);
9091out_free_iocblist:
9092 lpfc_free_iocb_list(phba);
9093out_destroy_queue:
9094 lpfc_sli4_queue_destroy(phba);
9095out_stop_timers:
9096 lpfc_stop_hba_timers(phba);
9097out_free_mbox:
9098 mempool_free(mboxq, phba->mbox_mem_pool);
9099 return rc;
9100}
9101
9102/**
9103 * lpfc_mbox_timeout - Timeout call back function for mbox timer
9104 * @t: Context to fetch pointer to hba structure from.
9105 *
9106 * This is the callback function for mailbox timer. The mailbox
9107 * timer is armed when a new mailbox command is issued and the timer
9108 * is deleted when the mailbox complete. The function is called by
9109 * the kernel timer code when a mailbox does not complete within
9110 * expected time. This function wakes up the worker thread to
9111 * process the mailbox timeout and returns. All the processing is
9112 * done by the worker thread function lpfc_mbox_timeout_handler.
9113 **/
9114void
9115lpfc_mbox_timeout(struct timer_list *t)
9116{
9117 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
9118 unsigned long iflag;
9119 uint32_t tmo_posted;
9120
9121 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
9122 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
9123 if (!tmo_posted)
9124 phba->pport->work_port_events |= WORKER_MBOX_TMO;
9125 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
9126
9127 if (!tmo_posted)
9128 lpfc_worker_wake_up(phba);
9129 return;
9130}
9131
9132/**
9133 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
9134 * are pending
9135 * @phba: Pointer to HBA context object.
9136 *
9137 * This function checks if any mailbox completions are present on the mailbox
9138 * completion queue.
9139 **/
9140static bool
9141lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
9142{
9143
9144 uint32_t idx;
9145 struct lpfc_queue *mcq;
9146 struct lpfc_mcqe *mcqe;
9147 bool pending_completions = false;
9148 uint8_t qe_valid;
9149
9150 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
9151 return false;
9152
9153 /* Check for completions on mailbox completion queue */
9154
9155 mcq = phba->sli4_hba.mbx_cq;
9156 idx = mcq->hba_index;
9157 qe_valid = mcq->qe_valid;
9158 while (bf_get_le32(lpfc_cqe_valid,
9159 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
9160 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
9161 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
9162 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
9163 pending_completions = true;
9164 break;
9165 }
9166 idx = (idx + 1) % mcq->entry_count;
9167 if (mcq->hba_index == idx)
9168 break;
9169
9170 /* if the index wrapped around, toggle the valid bit */
9171 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
9172 qe_valid = (qe_valid) ? 0 : 1;
9173 }
9174 return pending_completions;
9175
9176}
9177
9178/**
9179 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
9180 * that were missed.
9181 * @phba: Pointer to HBA context object.
9182 *
9183 * For sli4, it is possible to miss an interrupt. As such mbox completions
9184 * maybe missed causing erroneous mailbox timeouts to occur. This function
9185 * checks to see if mbox completions are on the mailbox completion queue
9186 * and will process all the completions associated with the eq for the
9187 * mailbox completion queue.
9188 **/
9189static bool
9190lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
9191{
9192 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
9193 uint32_t eqidx;
9194 struct lpfc_queue *fpeq = NULL;
9195 struct lpfc_queue *eq;
9196 bool mbox_pending;
9197
9198 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
9199 return false;
9200
9201 /* Find the EQ associated with the mbox CQ */
9202 if (sli4_hba->hdwq) {
9203 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
9204 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
9205 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
9206 fpeq = eq;
9207 break;
9208 }
9209 }
9210 }
9211 if (!fpeq)
9212 return false;
9213
9214 /* Turn off interrupts from this EQ */
9215
9216 sli4_hba->sli4_eq_clr_intr(fpeq);
9217
9218 /* Check to see if a mbox completion is pending */
9219
9220 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
9221
9222 /*
9223 * If a mbox completion is pending, process all the events on EQ
9224 * associated with the mbox completion queue (this could include
9225 * mailbox commands, async events, els commands, receive queue data
9226 * and fcp commands)
9227 */
9228
9229 if (mbox_pending)
9230 /* process and rearm the EQ */
9231 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
9232 LPFC_QUEUE_WORK);
9233 else
9234 /* Always clear and re-arm the EQ */
9235 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
9236
9237 return mbox_pending;
9238
9239}
9240
9241/**
9242 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
9243 * @phba: Pointer to HBA context object.
9244 *
9245 * This function is called from worker thread when a mailbox command times out.
9246 * The caller is not required to hold any locks. This function will reset the
9247 * HBA and recover all the pending commands.
9248 **/
9249void
9250lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
9251{
9252 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
9253 MAILBOX_t *mb = NULL;
9254
9255 struct lpfc_sli *psli = &phba->sli;
9256
9257 /* If the mailbox completed, process the completion */
9258 lpfc_sli4_process_missed_mbox_completions(phba);
9259
9260 if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
9261 return;
9262
9263 if (pmbox != NULL)
9264 mb = &pmbox->u.mb;
9265 /* Check the pmbox pointer first. There is a race condition
9266 * between the mbox timeout handler getting executed in the
9267 * worklist and the mailbox actually completing. When this
9268 * race condition occurs, the mbox_active will be NULL.
9269 */
9270 spin_lock_irq(&phba->hbalock);
9271 if (pmbox == NULL) {
9272 lpfc_printf_log(phba, KERN_WARNING,
9273 LOG_MBOX | LOG_SLI,
9274 "0353 Active Mailbox cleared - mailbox timeout "
9275 "exiting\n");
9276 spin_unlock_irq(&phba->hbalock);
9277 return;
9278 }
9279
9280 /* Mbox cmd <mbxCommand> timeout */
9281 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9282 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
9283 mb->mbxCommand,
9284 phba->pport->port_state,
9285 phba->sli.sli_flag,
9286 phba->sli.mbox_active);
9287 spin_unlock_irq(&phba->hbalock);
9288
9289 /* Setting state unknown so lpfc_sli_abort_iocb_ring
9290 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
9291 * it to fail all outstanding SCSI IO.
9292 */
9293 set_bit(MBX_TMO_ERR, &phba->bit_flags);
9294 spin_lock_irq(&phba->pport->work_port_lock);
9295 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9296 spin_unlock_irq(&phba->pport->work_port_lock);
9297 spin_lock_irq(&phba->hbalock);
9298 phba->link_state = LPFC_LINK_UNKNOWN;
9299 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9300 spin_unlock_irq(&phba->hbalock);
9301
9302 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9303 "0345 Resetting board due to mailbox timeout\n");
9304
9305 /* Reset the HBA device */
9306 lpfc_reset_hba(phba);
9307}
9308
9309/**
9310 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
9311 * @phba: Pointer to HBA context object.
9312 * @pmbox: Pointer to mailbox object.
9313 * @flag: Flag indicating how the mailbox need to be processed.
9314 *
9315 * This function is called by discovery code and HBA management code
9316 * to submit a mailbox command to firmware with SLI-3 interface spec. This
9317 * function gets the hbalock to protect the data structures.
9318 * The mailbox command can be submitted in polling mode, in which case
9319 * this function will wait in a polling loop for the completion of the
9320 * mailbox.
9321 * If the mailbox is submitted in no_wait mode (not polling) the
9322 * function will submit the command and returns immediately without waiting
9323 * for the mailbox completion. The no_wait is supported only when HBA
9324 * is in SLI2/SLI3 mode - interrupts are enabled.
9325 * The SLI interface allows only one mailbox pending at a time. If the
9326 * mailbox is issued in polling mode and there is already a mailbox
9327 * pending, then the function will return an error. If the mailbox is issued
9328 * in NO_WAIT mode and there is a mailbox pending already, the function
9329 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
9330 * The sli layer owns the mailbox object until the completion of mailbox
9331 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
9332 * return codes the caller owns the mailbox command after the return of
9333 * the function.
9334 **/
9335static int
9336lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
9337 uint32_t flag)
9338{
9339 MAILBOX_t *mbx;
9340 struct lpfc_sli *psli = &phba->sli;
9341 uint32_t status, evtctr;
9342 uint32_t ha_copy, hc_copy;
9343 int i;
9344 unsigned long timeout;
9345 unsigned long drvr_flag = 0;
9346 uint32_t word0, ldata;
9347 void __iomem *to_slim;
9348 int processing_queue = 0;
9349
9350 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9351 if (!pmbox) {
9352 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9353 /* processing mbox queue from intr_handler */
9354 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9355 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9356 return MBX_SUCCESS;
9357 }
9358 processing_queue = 1;
9359 pmbox = lpfc_mbox_get(phba);
9360 if (!pmbox) {
9361 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9362 return MBX_SUCCESS;
9363 }
9364 }
9365
9366 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
9367 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
9368 if(!pmbox->vport) {
9369 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9370 lpfc_printf_log(phba, KERN_ERR,
9371 LOG_MBOX | LOG_VPORT,
9372 "1806 Mbox x%x failed. No vport\n",
9373 pmbox->u.mb.mbxCommand);
9374 dump_stack();
9375 goto out_not_finished;
9376 }
9377 }
9378
9379 /* If the PCI channel is in offline state, do not post mbox. */
9380 if (unlikely(pci_channel_offline(phba->pcidev))) {
9381 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9382 goto out_not_finished;
9383 }
9384
9385 /* If HBA has a deferred error attention, fail the iocb. */
9386 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
9387 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9388 goto out_not_finished;
9389 }
9390
9391 psli = &phba->sli;
9392
9393 mbx = &pmbox->u.mb;
9394 status = MBX_SUCCESS;
9395
9396 if (phba->link_state == LPFC_HBA_ERROR) {
9397 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9398
9399 /* Mbox command <mbxCommand> cannot issue */
9400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9401 "(%d):0311 Mailbox command x%x cannot "
9402 "issue Data: x%x x%x\n",
9403 pmbox->vport ? pmbox->vport->vpi : 0,
9404 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9405 goto out_not_finished;
9406 }
9407
9408 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9409 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
9410 !(hc_copy & HC_MBINT_ENA)) {
9411 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9412 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9413 "(%d):2528 Mailbox command x%x cannot "
9414 "issue Data: x%x x%x\n",
9415 pmbox->vport ? pmbox->vport->vpi : 0,
9416 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9417 goto out_not_finished;
9418 }
9419 }
9420
9421 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9422 /* Polling for a mbox command when another one is already active
9423 * is not allowed in SLI. Also, the driver must have established
9424 * SLI2 mode to queue and process multiple mbox commands.
9425 */
9426
9427 if (flag & MBX_POLL) {
9428 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9429
9430 /* Mbox command <mbxCommand> cannot issue */
9431 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9432 "(%d):2529 Mailbox command x%x "
9433 "cannot issue Data: x%x x%x\n",
9434 pmbox->vport ? pmbox->vport->vpi : 0,
9435 pmbox->u.mb.mbxCommand,
9436 psli->sli_flag, flag);
9437 goto out_not_finished;
9438 }
9439
9440 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
9441 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9442 /* Mbox command <mbxCommand> cannot issue */
9443 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9444 "(%d):2530 Mailbox command x%x "
9445 "cannot issue Data: x%x x%x\n",
9446 pmbox->vport ? pmbox->vport->vpi : 0,
9447 pmbox->u.mb.mbxCommand,
9448 psli->sli_flag, flag);
9449 goto out_not_finished;
9450 }
9451
9452 /* Another mailbox command is still being processed, queue this
9453 * command to be processed later.
9454 */
9455 lpfc_mbox_put(phba, pmbox);
9456
9457 /* Mbox cmd issue - BUSY */
9458 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9459 "(%d):0308 Mbox cmd issue - BUSY Data: "
9460 "x%x x%x x%x x%x\n",
9461 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
9462 mbx->mbxCommand,
9463 phba->pport ? phba->pport->port_state : 0xff,
9464 psli->sli_flag, flag);
9465
9466 psli->slistat.mbox_busy++;
9467 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9468
9469 if (pmbox->vport) {
9470 lpfc_debugfs_disc_trc(pmbox->vport,
9471 LPFC_DISC_TRC_MBOX_VPORT,
9472 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
9473 (uint32_t)mbx->mbxCommand,
9474 mbx->un.varWords[0], mbx->un.varWords[1]);
9475 }
9476 else {
9477 lpfc_debugfs_disc_trc(phba->pport,
9478 LPFC_DISC_TRC_MBOX,
9479 "MBOX Bsy: cmd:x%x mb:x%x x%x",
9480 (uint32_t)mbx->mbxCommand,
9481 mbx->un.varWords[0], mbx->un.varWords[1]);
9482 }
9483
9484 return MBX_BUSY;
9485 }
9486
9487 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9488
9489 /* If we are not polling, we MUST be in SLI2 mode */
9490 if (flag != MBX_POLL) {
9491 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
9492 (mbx->mbxCommand != MBX_KILL_BOARD)) {
9493 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9494 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9495 /* Mbox command <mbxCommand> cannot issue */
9496 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9497 "(%d):2531 Mailbox command x%x "
9498 "cannot issue Data: x%x x%x\n",
9499 pmbox->vport ? pmbox->vport->vpi : 0,
9500 pmbox->u.mb.mbxCommand,
9501 psli->sli_flag, flag);
9502 goto out_not_finished;
9503 }
9504 /* timeout active mbox command */
9505 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9506 1000);
9507 mod_timer(&psli->mbox_tmo, jiffies + timeout);
9508 }
9509
9510 /* Mailbox cmd <cmd> issue */
9511 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9512 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
9513 "x%x\n",
9514 pmbox->vport ? pmbox->vport->vpi : 0,
9515 mbx->mbxCommand,
9516 phba->pport ? phba->pport->port_state : 0xff,
9517 psli->sli_flag, flag);
9518
9519 if (mbx->mbxCommand != MBX_HEARTBEAT) {
9520 if (pmbox->vport) {
9521 lpfc_debugfs_disc_trc(pmbox->vport,
9522 LPFC_DISC_TRC_MBOX_VPORT,
9523 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9524 (uint32_t)mbx->mbxCommand,
9525 mbx->un.varWords[0], mbx->un.varWords[1]);
9526 }
9527 else {
9528 lpfc_debugfs_disc_trc(phba->pport,
9529 LPFC_DISC_TRC_MBOX,
9530 "MBOX Send: cmd:x%x mb:x%x x%x",
9531 (uint32_t)mbx->mbxCommand,
9532 mbx->un.varWords[0], mbx->un.varWords[1]);
9533 }
9534 }
9535
9536 psli->slistat.mbox_cmd++;
9537 evtctr = psli->slistat.mbox_event;
9538
9539 /* next set own bit for the adapter and copy over command word */
9540 mbx->mbxOwner = OWN_CHIP;
9541
9542 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9543 /* Populate mbox extension offset word. */
9544 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9545 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9546 = (uint8_t *)phba->mbox_ext
9547 - (uint8_t *)phba->mbox;
9548 }
9549
9550 /* Copy the mailbox extension data */
9551 if (pmbox->in_ext_byte_len && pmbox->ext_buf) {
9552 lpfc_sli_pcimem_bcopy(pmbox->ext_buf,
9553 (uint8_t *)phba->mbox_ext,
9554 pmbox->in_ext_byte_len);
9555 }
9556 /* Copy command data to host SLIM area */
9557 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
9558 } else {
9559 /* Populate mbox extension offset word. */
9560 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9561 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9562 = MAILBOX_HBA_EXT_OFFSET;
9563
9564 /* Copy the mailbox extension data */
9565 if (pmbox->in_ext_byte_len && pmbox->ext_buf)
9566 lpfc_memcpy_to_slim(phba->MBslimaddr +
9567 MAILBOX_HBA_EXT_OFFSET,
9568 pmbox->ext_buf, pmbox->in_ext_byte_len);
9569
9570 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9571 /* copy command data into host mbox for cmpl */
9572 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
9573 MAILBOX_CMD_SIZE);
9574
9575 /* First copy mbox command data to HBA SLIM, skip past first
9576 word */
9577 to_slim = phba->MBslimaddr + sizeof (uint32_t);
9578 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
9579 MAILBOX_CMD_SIZE - sizeof (uint32_t));
9580
9581 /* Next copy over first word, with mbxOwner set */
9582 ldata = *((uint32_t *)mbx);
9583 to_slim = phba->MBslimaddr;
9584 writel(ldata, to_slim);
9585 readl(to_slim); /* flush */
9586
9587 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9588 /* switch over to host mailbox */
9589 psli->sli_flag |= LPFC_SLI_ACTIVE;
9590 }
9591
9592 wmb();
9593
9594 switch (flag) {
9595 case MBX_NOWAIT:
9596 /* Set up reference to mailbox command */
9597 psli->mbox_active = pmbox;
9598 /* Interrupt board to do it */
9599 writel(CA_MBATT, phba->CAregaddr);
9600 readl(phba->CAregaddr); /* flush */
9601 /* Don't wait for it to finish, just return */
9602 break;
9603
9604 case MBX_POLL:
9605 /* Set up null reference to mailbox command */
9606 psli->mbox_active = NULL;
9607 /* Interrupt board to do it */
9608 writel(CA_MBATT, phba->CAregaddr);
9609 readl(phba->CAregaddr); /* flush */
9610
9611 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9612 /* First read mbox status word */
9613 word0 = *((uint32_t *)phba->mbox);
9614 word0 = le32_to_cpu(word0);
9615 } else {
9616 /* First read mbox status word */
9617 if (lpfc_readl(phba->MBslimaddr, &word0)) {
9618 spin_unlock_irqrestore(&phba->hbalock,
9619 drvr_flag);
9620 goto out_not_finished;
9621 }
9622 }
9623
9624 /* Read the HBA Host Attention Register */
9625 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9626 spin_unlock_irqrestore(&phba->hbalock,
9627 drvr_flag);
9628 goto out_not_finished;
9629 }
9630 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9631 1000) + jiffies;
9632 i = 0;
9633 /* Wait for command to complete */
9634 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
9635 (!(ha_copy & HA_MBATT) &&
9636 (phba->link_state > LPFC_WARM_START))) {
9637 if (time_after(jiffies, timeout)) {
9638 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9639 spin_unlock_irqrestore(&phba->hbalock,
9640 drvr_flag);
9641 goto out_not_finished;
9642 }
9643
9644 /* Check if we took a mbox interrupt while we were
9645 polling */
9646 if (((word0 & OWN_CHIP) != OWN_CHIP)
9647 && (evtctr != psli->slistat.mbox_event))
9648 break;
9649
9650 if (i++ > 10) {
9651 spin_unlock_irqrestore(&phba->hbalock,
9652 drvr_flag);
9653 msleep(1);
9654 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9655 }
9656
9657 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9658 /* First copy command data */
9659 word0 = *((uint32_t *)phba->mbox);
9660 word0 = le32_to_cpu(word0);
9661 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
9662 MAILBOX_t *slimmb;
9663 uint32_t slimword0;
9664 /* Check real SLIM for any errors */
9665 slimword0 = readl(phba->MBslimaddr);
9666 slimmb = (MAILBOX_t *) & slimword0;
9667 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
9668 && slimmb->mbxStatus) {
9669 psli->sli_flag &=
9670 ~LPFC_SLI_ACTIVE;
9671 word0 = slimword0;
9672 }
9673 }
9674 } else {
9675 /* First copy command data */
9676 word0 = readl(phba->MBslimaddr);
9677 }
9678 /* Read the HBA Host Attention Register */
9679 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9680 spin_unlock_irqrestore(&phba->hbalock,
9681 drvr_flag);
9682 goto out_not_finished;
9683 }
9684 }
9685
9686 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9687 /* copy results back to user */
9688 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
9689 MAILBOX_CMD_SIZE);
9690 /* Copy the mailbox extension data */
9691 if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
9692 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9693 pmbox->ext_buf,
9694 pmbox->out_ext_byte_len);
9695 }
9696 } else {
9697 /* First copy command data */
9698 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
9699 MAILBOX_CMD_SIZE);
9700 /* Copy the mailbox extension data */
9701 if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
9702 lpfc_memcpy_from_slim(
9703 pmbox->ext_buf,
9704 phba->MBslimaddr +
9705 MAILBOX_HBA_EXT_OFFSET,
9706 pmbox->out_ext_byte_len);
9707 }
9708 }
9709
9710 writel(HA_MBATT, phba->HAregaddr);
9711 readl(phba->HAregaddr); /* flush */
9712
9713 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9714 status = mbx->mbxStatus;
9715 }
9716
9717 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9718 return status;
9719
9720out_not_finished:
9721 if (processing_queue) {
9722 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
9723 lpfc_mbox_cmpl_put(phba, pmbox);
9724 }
9725 return MBX_NOT_FINISHED;
9726}
9727
9728/**
9729 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
9730 * @phba: Pointer to HBA context object.
9731 *
9732 * The function blocks the posting of SLI4 asynchronous mailbox commands from
9733 * the driver internal pending mailbox queue. It will then try to wait out the
9734 * possible outstanding mailbox command before return.
9735 *
9736 * Returns:
9737 * 0 - the outstanding mailbox command completed; otherwise, the wait for
9738 * the outstanding mailbox command timed out.
9739 **/
9740static int
9741lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
9742{
9743 struct lpfc_sli *psli = &phba->sli;
9744 LPFC_MBOXQ_t *mboxq;
9745 int rc = 0;
9746 unsigned long timeout = 0;
9747 u32 sli_flag;
9748 u8 cmd, subsys, opcode;
9749
9750 /* Mark the asynchronous mailbox command posting as blocked */
9751 spin_lock_irq(&phba->hbalock);
9752 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9753 /* Determine how long we might wait for the active mailbox
9754 * command to be gracefully completed by firmware.
9755 */
9756 if (phba->sli.mbox_active)
9757 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9758 phba->sli.mbox_active) *
9759 1000) + jiffies;
9760 spin_unlock_irq(&phba->hbalock);
9761
9762 /* Make sure the mailbox is really active */
9763 if (timeout)
9764 lpfc_sli4_process_missed_mbox_completions(phba);
9765
9766 /* Wait for the outstanding mailbox command to complete */
9767 while (phba->sli.mbox_active) {
9768 /* Check active mailbox complete status every 2ms */
9769 msleep(2);
9770 if (time_after(jiffies, timeout)) {
9771 /* Timeout, mark the outstanding cmd not complete */
9772
9773 /* Sanity check sli.mbox_active has not completed or
9774 * cancelled from another context during last 2ms sleep,
9775 * so take hbalock to be sure before logging.
9776 */
9777 spin_lock_irq(&phba->hbalock);
9778 if (phba->sli.mbox_active) {
9779 mboxq = phba->sli.mbox_active;
9780 cmd = mboxq->u.mb.mbxCommand;
9781 subsys = lpfc_sli_config_mbox_subsys_get(phba,
9782 mboxq);
9783 opcode = lpfc_sli_config_mbox_opcode_get(phba,
9784 mboxq);
9785 sli_flag = psli->sli_flag;
9786 spin_unlock_irq(&phba->hbalock);
9787 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9788 "2352 Mailbox command x%x "
9789 "(x%x/x%x) sli_flag x%x could "
9790 "not complete\n",
9791 cmd, subsys, opcode,
9792 sli_flag);
9793 } else {
9794 spin_unlock_irq(&phba->hbalock);
9795 }
9796
9797 rc = 1;
9798 break;
9799 }
9800 }
9801
9802 /* Can not cleanly block async mailbox command, fails it */
9803 if (rc) {
9804 spin_lock_irq(&phba->hbalock);
9805 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9806 spin_unlock_irq(&phba->hbalock);
9807 }
9808 return rc;
9809}
9810
9811/**
9812 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
9813 * @phba: Pointer to HBA context object.
9814 *
9815 * The function unblocks and resume posting of SLI4 asynchronous mailbox
9816 * commands from the driver internal pending mailbox queue. It makes sure
9817 * that there is no outstanding mailbox command before resuming posting
9818 * asynchronous mailbox commands. If, for any reason, there is outstanding
9819 * mailbox command, it will try to wait it out before resuming asynchronous
9820 * mailbox command posting.
9821 **/
9822static void
9823lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
9824{
9825 struct lpfc_sli *psli = &phba->sli;
9826
9827 spin_lock_irq(&phba->hbalock);
9828 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9829 /* Asynchronous mailbox posting is not blocked, do nothing */
9830 spin_unlock_irq(&phba->hbalock);
9831 return;
9832 }
9833
9834 /* Outstanding synchronous mailbox command is guaranteed to be done,
9835 * successful or timeout, after timing-out the outstanding mailbox
9836 * command shall always be removed, so just unblock posting async
9837 * mailbox command and resume
9838 */
9839 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9840 spin_unlock_irq(&phba->hbalock);
9841
9842 /* wake up worker thread to post asynchronous mailbox command */
9843 lpfc_worker_wake_up(phba);
9844}
9845
9846/**
9847 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
9848 * @phba: Pointer to HBA context object.
9849 * @mboxq: Pointer to mailbox object.
9850 *
9851 * The function waits for the bootstrap mailbox register ready bit from
9852 * port for twice the regular mailbox command timeout value.
9853 *
9854 * 0 - no timeout on waiting for bootstrap mailbox register ready.
9855 * MBXERR_ERROR - wait for bootstrap mailbox register timed out or port
9856 * is in an unrecoverable state.
9857 **/
9858static int
9859lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9860{
9861 uint32_t db_ready;
9862 unsigned long timeout;
9863 struct lpfc_register bmbx_reg;
9864 struct lpfc_register portstat_reg = {-1};
9865
9866 /* Sanity check - there is no point to wait if the port is in an
9867 * unrecoverable state.
9868 */
9869 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
9870 LPFC_SLI_INTF_IF_TYPE_2) {
9871 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9872 &portstat_reg.word0) ||
9873 lpfc_sli4_unrecoverable_port(&portstat_reg)) {
9874 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9875 "3858 Skipping bmbx ready because "
9876 "Port Status x%x\n",
9877 portstat_reg.word0);
9878 return MBXERR_ERROR;
9879 }
9880 }
9881
9882 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
9883 * 1000) + jiffies;
9884
9885 do {
9886 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
9887 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
9888 if (!db_ready)
9889 mdelay(2);
9890
9891 if (time_after(jiffies, timeout))
9892 return MBXERR_ERROR;
9893 } while (!db_ready);
9894
9895 return 0;
9896}
9897
9898/**
9899 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
9900 * @phba: Pointer to HBA context object.
9901 * @mboxq: Pointer to mailbox object.
9902 *
9903 * The function posts a mailbox to the port. The mailbox is expected
9904 * to be comletely filled in and ready for the port to operate on it.
9905 * This routine executes a synchronous completion operation on the
9906 * mailbox by polling for its completion.
9907 *
9908 * The caller must not be holding any locks when calling this routine.
9909 *
9910 * Returns:
9911 * MBX_SUCCESS - mailbox posted successfully
9912 * Any of the MBX error values.
9913 **/
9914static int
9915lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9916{
9917 int rc = MBX_SUCCESS;
9918 unsigned long iflag;
9919 uint32_t mcqe_status;
9920 uint32_t mbx_cmnd;
9921 struct lpfc_sli *psli = &phba->sli;
9922 struct lpfc_mqe *mb = &mboxq->u.mqe;
9923 struct lpfc_bmbx_create *mbox_rgn;
9924 struct dma_address *dma_address;
9925
9926 /*
9927 * Only one mailbox can be active to the bootstrap mailbox region
9928 * at a time and there is no queueing provided.
9929 */
9930 spin_lock_irqsave(&phba->hbalock, iflag);
9931 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9932 spin_unlock_irqrestore(&phba->hbalock, iflag);
9933 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9934 "(%d):2532 Mailbox command x%x (x%x/x%x) "
9935 "cannot issue Data: x%x x%x\n",
9936 mboxq->vport ? mboxq->vport->vpi : 0,
9937 mboxq->u.mb.mbxCommand,
9938 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9939 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9940 psli->sli_flag, MBX_POLL);
9941 return MBXERR_ERROR;
9942 }
9943 /* The server grabs the token and owns it until release */
9944 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9945 phba->sli.mbox_active = mboxq;
9946 spin_unlock_irqrestore(&phba->hbalock, iflag);
9947
9948 /* wait for bootstrap mbox register for readyness */
9949 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9950 if (rc)
9951 goto exit;
9952 /*
9953 * Initialize the bootstrap memory region to avoid stale data areas
9954 * in the mailbox post. Then copy the caller's mailbox contents to
9955 * the bmbx mailbox region.
9956 */
9957 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
9958 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
9959 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
9960 sizeof(struct lpfc_mqe));
9961
9962 /* Post the high mailbox dma address to the port and wait for ready. */
9963 dma_address = &phba->sli4_hba.bmbx.dma_address;
9964 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
9965
9966 /* wait for bootstrap mbox register for hi-address write done */
9967 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9968 if (rc)
9969 goto exit;
9970
9971 /* Post the low mailbox dma address to the port. */
9972 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
9973
9974 /* wait for bootstrap mbox register for low address write done */
9975 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9976 if (rc)
9977 goto exit;
9978
9979 /*
9980 * Read the CQ to ensure the mailbox has completed.
9981 * If so, update the mailbox status so that the upper layers
9982 * can complete the request normally.
9983 */
9984 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
9985 sizeof(struct lpfc_mqe));
9986 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
9987 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
9988 sizeof(struct lpfc_mcqe));
9989 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
9990 /*
9991 * When the CQE status indicates a failure and the mailbox status
9992 * indicates success then copy the CQE status into the mailbox status
9993 * (and prefix it with x4000).
9994 */
9995 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
9996 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9997 bf_set(lpfc_mqe_status, mb,
9998 (LPFC_MBX_ERROR_RANGE | mcqe_status));
9999 rc = MBXERR_ERROR;
10000 } else
10001 lpfc_sli4_swap_str(phba, mboxq);
10002
10003 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10004 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
10005 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
10006 " x%x x%x CQ: x%x x%x x%x x%x\n",
10007 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
10008 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10009 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10010 bf_get(lpfc_mqe_status, mb),
10011 mb->un.mb_words[0], mb->un.mb_words[1],
10012 mb->un.mb_words[2], mb->un.mb_words[3],
10013 mb->un.mb_words[4], mb->un.mb_words[5],
10014 mb->un.mb_words[6], mb->un.mb_words[7],
10015 mb->un.mb_words[8], mb->un.mb_words[9],
10016 mb->un.mb_words[10], mb->un.mb_words[11],
10017 mb->un.mb_words[12], mboxq->mcqe.word0,
10018 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
10019 mboxq->mcqe.trailer);
10020exit:
10021 /* We are holding the token, no needed for lock when release */
10022 spin_lock_irqsave(&phba->hbalock, iflag);
10023 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10024 phba->sli.mbox_active = NULL;
10025 spin_unlock_irqrestore(&phba->hbalock, iflag);
10026 return rc;
10027}
10028
10029/**
10030 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
10031 * @phba: Pointer to HBA context object.
10032 * @mboxq: Pointer to mailbox object.
10033 * @flag: Flag indicating how the mailbox need to be processed.
10034 *
10035 * This function is called by discovery code and HBA management code to submit
10036 * a mailbox command to firmware with SLI-4 interface spec.
10037 *
10038 * Return codes the caller owns the mailbox command after the return of the
10039 * function.
10040 **/
10041static int
10042lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
10043 uint32_t flag)
10044{
10045 struct lpfc_sli *psli = &phba->sli;
10046 unsigned long iflags;
10047 int rc;
10048
10049 /* dump from issue mailbox command if setup */
10050 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
10051
10052 rc = lpfc_mbox_dev_check(phba);
10053 if (unlikely(rc)) {
10054 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10055 "(%d):2544 Mailbox command x%x (x%x/x%x) "
10056 "cannot issue Data: x%x x%x\n",
10057 mboxq->vport ? mboxq->vport->vpi : 0,
10058 mboxq->u.mb.mbxCommand,
10059 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10060 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10061 psli->sli_flag, flag);
10062 goto out_not_finished;
10063 }
10064
10065 /* Detect polling mode and jump to a handler */
10066 if (!phba->sli4_hba.intr_enable) {
10067 if (flag == MBX_POLL)
10068 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
10069 else
10070 rc = -EIO;
10071 if (rc != MBX_SUCCESS)
10072 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
10073 "(%d):2541 Mailbox command x%x "
10074 "(x%x/x%x) failure: "
10075 "mqe_sta: x%x mcqe_sta: x%x/x%x "
10076 "Data: x%x x%x\n",
10077 mboxq->vport ? mboxq->vport->vpi : 0,
10078 mboxq->u.mb.mbxCommand,
10079 lpfc_sli_config_mbox_subsys_get(phba,
10080 mboxq),
10081 lpfc_sli_config_mbox_opcode_get(phba,
10082 mboxq),
10083 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
10084 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
10085 bf_get(lpfc_mcqe_ext_status,
10086 &mboxq->mcqe),
10087 psli->sli_flag, flag);
10088 return rc;
10089 } else if (flag == MBX_POLL) {
10090 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
10091 "(%d):2542 Try to issue mailbox command "
10092 "x%x (x%x/x%x) synchronously ahead of async "
10093 "mailbox command queue: x%x x%x\n",
10094 mboxq->vport ? mboxq->vport->vpi : 0,
10095 mboxq->u.mb.mbxCommand,
10096 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10097 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10098 psli->sli_flag, flag);
10099 /* Try to block the asynchronous mailbox posting */
10100 rc = lpfc_sli4_async_mbox_block(phba);
10101 if (!rc) {
10102 /* Successfully blocked, now issue sync mbox cmd */
10103 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
10104 if (rc != MBX_SUCCESS)
10105 lpfc_printf_log(phba, KERN_WARNING,
10106 LOG_MBOX | LOG_SLI,
10107 "(%d):2597 Sync Mailbox command "
10108 "x%x (x%x/x%x) failure: "
10109 "mqe_sta: x%x mcqe_sta: x%x/x%x "
10110 "Data: x%x x%x\n",
10111 mboxq->vport ? mboxq->vport->vpi : 0,
10112 mboxq->u.mb.mbxCommand,
10113 lpfc_sli_config_mbox_subsys_get(phba,
10114 mboxq),
10115 lpfc_sli_config_mbox_opcode_get(phba,
10116 mboxq),
10117 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
10118 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
10119 bf_get(lpfc_mcqe_ext_status,
10120 &mboxq->mcqe),
10121 psli->sli_flag, flag);
10122 /* Unblock the async mailbox posting afterward */
10123 lpfc_sli4_async_mbox_unblock(phba);
10124 }
10125 return rc;
10126 }
10127
10128 /* Now, interrupt mode asynchronous mailbox command */
10129 rc = lpfc_mbox_cmd_check(phba, mboxq);
10130 if (rc) {
10131 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10132 "(%d):2543 Mailbox command x%x (x%x/x%x) "
10133 "cannot issue Data: x%x x%x\n",
10134 mboxq->vport ? mboxq->vport->vpi : 0,
10135 mboxq->u.mb.mbxCommand,
10136 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10137 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10138 psli->sli_flag, flag);
10139 goto out_not_finished;
10140 }
10141
10142 /* Put the mailbox command to the driver internal FIFO */
10143 psli->slistat.mbox_busy++;
10144 spin_lock_irqsave(&phba->hbalock, iflags);
10145 lpfc_mbox_put(phba, mboxq);
10146 spin_unlock_irqrestore(&phba->hbalock, iflags);
10147 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10148 "(%d):0354 Mbox cmd issue - Enqueue Data: "
10149 "x%x (x%x/x%x) x%x x%x x%x x%x\n",
10150 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
10151 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
10152 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10153 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10154 mboxq->u.mb.un.varUnregLogin.rpi,
10155 phba->pport->port_state,
10156 psli->sli_flag, MBX_NOWAIT);
10157 /* Wake up worker thread to transport mailbox command from head */
10158 lpfc_worker_wake_up(phba);
10159
10160 return MBX_BUSY;
10161
10162out_not_finished:
10163 return MBX_NOT_FINISHED;
10164}
10165
10166/**
10167 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
10168 * @phba: Pointer to HBA context object.
10169 *
10170 * This function is called by worker thread to send a mailbox command to
10171 * SLI4 HBA firmware.
10172 *
10173 **/
10174int
10175lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
10176{
10177 struct lpfc_sli *psli = &phba->sli;
10178 LPFC_MBOXQ_t *mboxq;
10179 int rc = MBX_SUCCESS;
10180 unsigned long iflags;
10181 struct lpfc_mqe *mqe;
10182 uint32_t mbx_cmnd;
10183
10184 /* Check interrupt mode before post async mailbox command */
10185 if (unlikely(!phba->sli4_hba.intr_enable))
10186 return MBX_NOT_FINISHED;
10187
10188 /* Check for mailbox command service token */
10189 spin_lock_irqsave(&phba->hbalock, iflags);
10190 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
10191 spin_unlock_irqrestore(&phba->hbalock, iflags);
10192 return MBX_NOT_FINISHED;
10193 }
10194 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10195 spin_unlock_irqrestore(&phba->hbalock, iflags);
10196 return MBX_NOT_FINISHED;
10197 }
10198 if (unlikely(phba->sli.mbox_active)) {
10199 spin_unlock_irqrestore(&phba->hbalock, iflags);
10200 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10201 "0384 There is pending active mailbox cmd\n");
10202 return MBX_NOT_FINISHED;
10203 }
10204 /* Take the mailbox command service token */
10205 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
10206
10207 /* Get the next mailbox command from head of queue */
10208 mboxq = lpfc_mbox_get(phba);
10209
10210 /* If no more mailbox command waiting for post, we're done */
10211 if (!mboxq) {
10212 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10213 spin_unlock_irqrestore(&phba->hbalock, iflags);
10214 return MBX_SUCCESS;
10215 }
10216 phba->sli.mbox_active = mboxq;
10217 spin_unlock_irqrestore(&phba->hbalock, iflags);
10218
10219 /* Check device readiness for posting mailbox command */
10220 rc = lpfc_mbox_dev_check(phba);
10221 if (unlikely(rc))
10222 /* Driver clean routine will clean up pending mailbox */
10223 goto out_not_finished;
10224
10225 /* Prepare the mbox command to be posted */
10226 mqe = &mboxq->u.mqe;
10227 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
10228
10229 /* Start timer for the mbox_tmo and log some mailbox post messages */
10230 mod_timer(&psli->mbox_tmo, (jiffies +
10231 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
10232
10233 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10234 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
10235 "x%x x%x\n",
10236 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
10237 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10238 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10239 phba->pport->port_state, psli->sli_flag);
10240
10241 if (mbx_cmnd != MBX_HEARTBEAT) {
10242 if (mboxq->vport) {
10243 lpfc_debugfs_disc_trc(mboxq->vport,
10244 LPFC_DISC_TRC_MBOX_VPORT,
10245 "MBOX Send vport: cmd:x%x mb:x%x x%x",
10246 mbx_cmnd, mqe->un.mb_words[0],
10247 mqe->un.mb_words[1]);
10248 } else {
10249 lpfc_debugfs_disc_trc(phba->pport,
10250 LPFC_DISC_TRC_MBOX,
10251 "MBOX Send: cmd:x%x mb:x%x x%x",
10252 mbx_cmnd, mqe->un.mb_words[0],
10253 mqe->un.mb_words[1]);
10254 }
10255 }
10256 psli->slistat.mbox_cmd++;
10257
10258 /* Post the mailbox command to the port */
10259 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
10260 if (rc != MBX_SUCCESS) {
10261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10262 "(%d):2533 Mailbox command x%x (x%x/x%x) "
10263 "cannot issue Data: x%x x%x\n",
10264 mboxq->vport ? mboxq->vport->vpi : 0,
10265 mboxq->u.mb.mbxCommand,
10266 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10267 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10268 psli->sli_flag, MBX_NOWAIT);
10269 goto out_not_finished;
10270 }
10271
10272 return rc;
10273
10274out_not_finished:
10275 spin_lock_irqsave(&phba->hbalock, iflags);
10276 if (phba->sli.mbox_active) {
10277 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10278 __lpfc_mbox_cmpl_put(phba, mboxq);
10279 /* Release the token */
10280 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10281 phba->sli.mbox_active = NULL;
10282 }
10283 spin_unlock_irqrestore(&phba->hbalock, iflags);
10284
10285 return MBX_NOT_FINISHED;
10286}
10287
10288/**
10289 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
10290 * @phba: Pointer to HBA context object.
10291 * @pmbox: Pointer to mailbox object.
10292 * @flag: Flag indicating how the mailbox need to be processed.
10293 *
10294 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
10295 * the API jump table function pointer from the lpfc_hba struct.
10296 *
10297 * Return codes the caller owns the mailbox command after the return of the
10298 * function.
10299 **/
10300int
10301lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
10302{
10303 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
10304}
10305
10306/**
10307 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
10308 * @phba: The hba struct for which this call is being executed.
10309 * @dev_grp: The HBA PCI-Device group number.
10310 *
10311 * This routine sets up the mbox interface API function jump table in @phba
10312 * struct.
10313 * Returns: 0 - success, -ENODEV - failure.
10314 **/
10315int
10316lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10317{
10318
10319 switch (dev_grp) {
10320 case LPFC_PCI_DEV_LP:
10321 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
10322 phba->lpfc_sli_handle_slow_ring_event =
10323 lpfc_sli_handle_slow_ring_event_s3;
10324 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
10325 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
10326 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
10327 break;
10328 case LPFC_PCI_DEV_OC:
10329 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
10330 phba->lpfc_sli_handle_slow_ring_event =
10331 lpfc_sli_handle_slow_ring_event_s4;
10332 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
10333 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
10334 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
10335 break;
10336 default:
10337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10338 "1420 Invalid HBA PCI-device group: 0x%x\n",
10339 dev_grp);
10340 return -ENODEV;
10341 }
10342 return 0;
10343}
10344
10345/**
10346 * __lpfc_sli_ringtx_put - Add an iocb to the txq
10347 * @phba: Pointer to HBA context object.
10348 * @pring: Pointer to driver SLI ring object.
10349 * @piocb: Pointer to address of newly added command iocb.
10350 *
10351 * This function is called with hbalock held for SLI3 ports or
10352 * the ring lock held for SLI4 ports to add a command
10353 * iocb to the txq when SLI layer cannot submit the command iocb
10354 * to the ring.
10355 **/
10356void
10357__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10358 struct lpfc_iocbq *piocb)
10359{
10360 if (phba->sli_rev == LPFC_SLI_REV4)
10361 lockdep_assert_held(&pring->ring_lock);
10362 else
10363 lockdep_assert_held(&phba->hbalock);
10364 /* Insert the caller's iocb in the txq tail for later processing. */
10365 list_add_tail(&piocb->list, &pring->txq);
10366}
10367
10368/**
10369 * lpfc_sli_next_iocb - Get the next iocb in the txq
10370 * @phba: Pointer to HBA context object.
10371 * @pring: Pointer to driver SLI ring object.
10372 * @piocb: Pointer to address of newly added command iocb.
10373 *
10374 * This function is called with hbalock held before a new
10375 * iocb is submitted to the firmware. This function checks
10376 * txq to flush the iocbs in txq to Firmware before
10377 * submitting new iocbs to the Firmware.
10378 * If there are iocbs in the txq which need to be submitted
10379 * to firmware, lpfc_sli_next_iocb returns the first element
10380 * of the txq after dequeuing it from txq.
10381 * If there is no iocb in the txq then the function will return
10382 * *piocb and *piocb is set to NULL. Caller needs to check
10383 * *piocb to find if there are more commands in the txq.
10384 **/
10385static struct lpfc_iocbq *
10386lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10387 struct lpfc_iocbq **piocb)
10388{
10389 struct lpfc_iocbq * nextiocb;
10390
10391 lockdep_assert_held(&phba->hbalock);
10392
10393 nextiocb = lpfc_sli_ringtx_get(phba, pring);
10394 if (!nextiocb) {
10395 nextiocb = *piocb;
10396 *piocb = NULL;
10397 }
10398
10399 return nextiocb;
10400}
10401
10402/**
10403 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
10404 * @phba: Pointer to HBA context object.
10405 * @ring_number: SLI ring number to issue iocb on.
10406 * @piocb: Pointer to command iocb.
10407 * @flag: Flag indicating if this command can be put into txq.
10408 *
10409 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
10410 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
10411 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
10412 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
10413 * this function allows only iocbs for posting buffers. This function finds
10414 * next available slot in the command ring and posts the command to the
10415 * available slot and writes the port attention register to request HBA start
10416 * processing new iocb. If there is no slot available in the ring and
10417 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
10418 * the function returns IOCB_BUSY.
10419 *
10420 * This function is called with hbalock held. The function will return success
10421 * after it successfully submit the iocb to firmware or after adding to the
10422 * txq.
10423 **/
10424static int
10425__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
10426 struct lpfc_iocbq *piocb, uint32_t flag)
10427{
10428 struct lpfc_iocbq *nextiocb;
10429 IOCB_t *iocb;
10430 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
10431
10432 lockdep_assert_held(&phba->hbalock);
10433
10434 if (piocb->cmd_cmpl && (!piocb->vport) &&
10435 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
10436 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
10437 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10438 "1807 IOCB x%x failed. No vport\n",
10439 piocb->iocb.ulpCommand);
10440 dump_stack();
10441 return IOCB_ERROR;
10442 }
10443
10444
10445 /* If the PCI channel is in offline state, do not post iocbs. */
10446 if (unlikely(pci_channel_offline(phba->pcidev)))
10447 return IOCB_ERROR;
10448
10449 /* If HBA has a deferred error attention, fail the iocb. */
10450 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
10451 return IOCB_ERROR;
10452
10453 /*
10454 * We should never get an IOCB if we are in a < LINK_DOWN state
10455 */
10456 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10457 return IOCB_ERROR;
10458
10459 /*
10460 * Check to see if we are blocking IOCB processing because of a
10461 * outstanding event.
10462 */
10463 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
10464 goto iocb_busy;
10465
10466 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
10467 /*
10468 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
10469 * can be issued if the link is not up.
10470 */
10471 switch (piocb->iocb.ulpCommand) {
10472 case CMD_QUE_RING_BUF_CN:
10473 case CMD_QUE_RING_BUF64_CN:
10474 /*
10475 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
10476 * completion, cmd_cmpl MUST be 0.
10477 */
10478 if (piocb->cmd_cmpl)
10479 piocb->cmd_cmpl = NULL;
10480 fallthrough;
10481 case CMD_CREATE_XRI_CR:
10482 case CMD_CLOSE_XRI_CN:
10483 case CMD_CLOSE_XRI_CX:
10484 break;
10485 default:
10486 goto iocb_busy;
10487 }
10488
10489 /*
10490 * For FCP commands, we must be in a state where we can process link
10491 * attention events.
10492 */
10493 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
10494 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
10495 goto iocb_busy;
10496 }
10497
10498 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
10499 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
10500 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
10501
10502 if (iocb)
10503 lpfc_sli_update_ring(phba, pring);
10504 else
10505 lpfc_sli_update_full_ring(phba, pring);
10506
10507 if (!piocb)
10508 return IOCB_SUCCESS;
10509
10510 goto out_busy;
10511
10512 iocb_busy:
10513 pring->stats.iocb_cmd_delay++;
10514
10515 out_busy:
10516
10517 if (!(flag & SLI_IOCB_RET_IOCB)) {
10518 __lpfc_sli_ringtx_put(phba, pring, piocb);
10519 return IOCB_SUCCESS;
10520 }
10521
10522 return IOCB_BUSY;
10523}
10524
10525/**
10526 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10527 * @phba: Pointer to HBA context object.
10528 * @ring_number: SLI ring number to issue wqe on.
10529 * @piocb: Pointer to command iocb.
10530 * @flag: Flag indicating if this command can be put into txq.
10531 *
10532 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10533 * send an iocb command to an HBA with SLI-3 interface spec.
10534 *
10535 * This function takes the hbalock before invoking the lockless version.
10536 * The function will return success after it successfully submit the wqe to
10537 * firmware or after adding to the txq.
10538 **/
10539static int
10540__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10541 struct lpfc_iocbq *piocb, uint32_t flag)
10542{
10543 unsigned long iflags;
10544 int rc;
10545
10546 spin_lock_irqsave(&phba->hbalock, iflags);
10547 rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10548 spin_unlock_irqrestore(&phba->hbalock, iflags);
10549
10550 return rc;
10551}
10552
10553/**
10554 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10555 * @phba: Pointer to HBA context object.
10556 * @ring_number: SLI ring number to issue wqe on.
10557 * @piocb: Pointer to command iocb.
10558 * @flag: Flag indicating if this command can be put into txq.
10559 *
10560 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10561 * an wqe command to an HBA with SLI-4 interface spec.
10562 *
10563 * This function is a lockless version. The function will return success
10564 * after it successfully submit the wqe to firmware or after adding to the
10565 * txq.
10566 **/
10567static int
10568__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10569 struct lpfc_iocbq *piocb, uint32_t flag)
10570{
10571 struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
10572
10573 lpfc_prep_embed_io(phba, lpfc_cmd);
10574 return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10575}
10576
10577void
10578lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
10579{
10580 struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
10581 union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
10582 struct sli4_sge_le *sgl;
10583 u32 type_size;
10584
10585 /* 128 byte wqe support here */
10586 sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl;
10587
10588 if (phba->fcp_embed_io) {
10589 struct fcp_cmnd *fcp_cmnd;
10590 u32 *ptr;
10591
10592 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10593
10594 /* Word 0-2 - FCP_CMND */
10595 type_size = le32_to_cpu(sgl->sge_len);
10596 type_size |= ULP_BDE64_TYPE_BDE_IMMED;
10597 wqe->generic.bde.tus.w = type_size;
10598 wqe->generic.bde.addrHigh = 0;
10599 wqe->generic.bde.addrLow = 72; /* Word 18 */
10600
10601 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10602 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10603
10604 /* Word 18-29 FCP CMND Payload */
10605 ptr = &wqe->words[18];
10606 lpfc_sli_pcimem_bcopy(fcp_cmnd, ptr, le32_to_cpu(sgl->sge_len));
10607 } else {
10608 /* Word 0-2 - Inline BDE */
10609 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
10610 wqe->generic.bde.tus.f.bdeSize = le32_to_cpu(sgl->sge_len);
10611 wqe->generic.bde.addrHigh = le32_to_cpu(sgl->addr_hi);
10612 wqe->generic.bde.addrLow = le32_to_cpu(sgl->addr_lo);
10613
10614 /* Word 10 */
10615 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10616 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10617 }
10618
10619 /* add the VMID tags as per switch response */
10620 if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
10621 if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
10622 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10623 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10624 (piocb->vmid_tag.cs_ctl_vmid));
10625 } else if (phba->cfg_vmid_app_header) {
10626 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10627 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10628 wqe->words[31] = piocb->vmid_tag.app_id;
10629 }
10630 }
10631}
10632
10633/**
10634 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10635 * @phba: Pointer to HBA context object.
10636 * @ring_number: SLI ring number to issue iocb on.
10637 * @piocb: Pointer to command iocb.
10638 * @flag: Flag indicating if this command can be put into txq.
10639 *
10640 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10641 * an iocb command to an HBA with SLI-4 interface spec.
10642 *
10643 * This function is called with ringlock held. The function will return success
10644 * after it successfully submit the iocb to firmware or after adding to the
10645 * txq.
10646 **/
10647static int
10648__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10649 struct lpfc_iocbq *piocb, uint32_t flag)
10650{
10651 struct lpfc_sglq *sglq;
10652 union lpfc_wqe128 *wqe;
10653 struct lpfc_queue *wq;
10654 struct lpfc_sli_ring *pring;
10655 u32 ulp_command = get_job_cmnd(phba, piocb);
10656
10657 /* Get the WQ */
10658 if ((piocb->cmd_flag & LPFC_IO_FCP) ||
10659 (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10660 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10661 } else {
10662 wq = phba->sli4_hba.els_wq;
10663 }
10664
10665 /* Get corresponding ring */
10666 pring = wq->pring;
10667
10668 /*
10669 * The WQE can be either 64 or 128 bytes,
10670 */
10671
10672 lockdep_assert_held(&pring->ring_lock);
10673 wqe = &piocb->wqe;
10674 if (piocb->sli4_xritag == NO_XRI) {
10675 if (ulp_command == CMD_ABORT_XRI_CX)
10676 sglq = NULL;
10677 else {
10678 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10679 if (!sglq) {
10680 if (!(flag & SLI_IOCB_RET_IOCB)) {
10681 __lpfc_sli_ringtx_put(phba,
10682 pring,
10683 piocb);
10684 return IOCB_SUCCESS;
10685 } else {
10686 return IOCB_BUSY;
10687 }
10688 }
10689 }
10690 } else if (piocb->cmd_flag & LPFC_IO_FCP) {
10691 /* These IO's already have an XRI and a mapped sgl. */
10692 sglq = NULL;
10693 }
10694 else {
10695 /*
10696 * This is a continuation of a commandi,(CX) so this
10697 * sglq is on the active list
10698 */
10699 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10700 if (!sglq)
10701 return IOCB_ERROR;
10702 }
10703
10704 if (sglq) {
10705 piocb->sli4_lxritag = sglq->sli4_lxritag;
10706 piocb->sli4_xritag = sglq->sli4_xritag;
10707
10708 /* ABTS sent by initiator to CT exchange, the
10709 * RX_ID field will be filled with the newly
10710 * allocated responder XRI.
10711 */
10712 if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
10713 piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
10714 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10715 piocb->sli4_xritag);
10716
10717 bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
10718 piocb->sli4_xritag);
10719
10720 if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
10721 return IOCB_ERROR;
10722 }
10723
10724 if (lpfc_sli4_wq_put(wq, wqe))
10725 return IOCB_ERROR;
10726
10727 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10728
10729 return 0;
10730}
10731
10732/*
10733 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10734 *
10735 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10736 * or IOCB for sli-3 function.
10737 * pointer from the lpfc_hba struct.
10738 *
10739 * Return codes:
10740 * IOCB_ERROR - Error
10741 * IOCB_SUCCESS - Success
10742 * IOCB_BUSY - Busy
10743 **/
10744int
10745lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10746 struct lpfc_iocbq *piocb, uint32_t flag)
10747{
10748 return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10749}
10750
10751/*
10752 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10753 *
10754 * This routine wraps the actual lockless version for issusing IOCB function
10755 * pointer from the lpfc_hba struct.
10756 *
10757 * Return codes:
10758 * IOCB_ERROR - Error
10759 * IOCB_SUCCESS - Success
10760 * IOCB_BUSY - Busy
10761 **/
10762int
10763__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10764 struct lpfc_iocbq *piocb, uint32_t flag)
10765{
10766 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10767}
10768
10769static void
10770__lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
10771 struct lpfc_vport *vport,
10772 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10773 u32 elscmd, u8 tmo, u8 expect_rsp)
10774{
10775 struct lpfc_hba *phba = vport->phba;
10776 IOCB_t *cmd;
10777
10778 cmd = &cmdiocbq->iocb;
10779 memset(cmd, 0, sizeof(*cmd));
10780
10781 cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10782 cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10783 cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10784
10785 if (expect_rsp) {
10786 cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
10787 cmd->un.elsreq64.remoteID = did; /* DID */
10788 cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
10789 cmd->ulpTimeout = tmo;
10790 } else {
10791 cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
10792 cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
10793 cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
10794 cmd->ulpPU = PARM_NPIV_DID;
10795 }
10796 cmd->ulpBdeCount = 1;
10797 cmd->ulpLe = 1;
10798 cmd->ulpClass = CLASS3;
10799
10800 /* If we have NPIV enabled, we want to send ELS traffic by VPI. */
10801 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
10802 if (expect_rsp) {
10803 cmd->un.elsreq64.myID = vport->fc_myDID;
10804
10805 /* For ELS_REQUEST64_CR, use the VPI by default */
10806 cmd->ulpContext = phba->vpi_ids[vport->vpi];
10807 }
10808
10809 cmd->ulpCt_h = 0;
10810 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10811 if (elscmd == ELS_CMD_ECHO)
10812 cmd->ulpCt_l = 0; /* context = invalid RPI */
10813 else
10814 cmd->ulpCt_l = 1; /* context = VPI */
10815 }
10816}
10817
10818static void
10819__lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
10820 struct lpfc_vport *vport,
10821 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10822 u32 elscmd, u8 tmo, u8 expect_rsp)
10823{
10824 struct lpfc_hba *phba = vport->phba;
10825 union lpfc_wqe128 *wqe;
10826 struct ulp_bde64_le *bde;
10827 u8 els_id;
10828
10829 wqe = &cmdiocbq->wqe;
10830 memset(wqe, 0, sizeof(*wqe));
10831
10832 /* Word 0 - 2 BDE */
10833 bde = (struct ulp_bde64_le *)&wqe->generic.bde;
10834 bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
10835 bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
10836 bde->type_size = cpu_to_le32(cmd_size);
10837 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10838
10839 if (expect_rsp) {
10840 bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
10841
10842 /* Transfer length */
10843 wqe->els_req.payload_len = cmd_size;
10844 wqe->els_req.max_response_payload_len = FCELSSIZE;
10845
10846 /* DID */
10847 bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
10848
10849 /* Word 11 - ELS_ID */
10850 switch (elscmd) {
10851 case ELS_CMD_PLOGI:
10852 els_id = LPFC_ELS_ID_PLOGI;
10853 break;
10854 case ELS_CMD_FLOGI:
10855 els_id = LPFC_ELS_ID_FLOGI;
10856 break;
10857 case ELS_CMD_LOGO:
10858 els_id = LPFC_ELS_ID_LOGO;
10859 break;
10860 case ELS_CMD_FDISC:
10861 if (!vport->fc_myDID) {
10862 els_id = LPFC_ELS_ID_FDISC;
10863 break;
10864 }
10865 fallthrough;
10866 default:
10867 els_id = LPFC_ELS_ID_DEFAULT;
10868 break;
10869 }
10870
10871 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
10872 } else {
10873 /* DID */
10874 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
10875
10876 /* Transfer length */
10877 wqe->xmit_els_rsp.response_payload_len = cmd_size;
10878
10879 bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
10880 CMD_XMIT_ELS_RSP64_WQE);
10881 }
10882
10883 bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
10884 bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
10885 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
10886
10887 /* If we have NPIV enabled, we want to send ELS traffic by VPI.
10888 * For SLI4, since the driver controls VPIs we also want to include
10889 * all ELS pt2pt protocol traffic as well.
10890 */
10891 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
10892 test_bit(FC_PT2PT, &vport->fc_flag)) {
10893 if (expect_rsp) {
10894 bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
10895
10896 /* For ELS_REQUEST64_WQE, use the VPI by default */
10897 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10898 phba->vpi_ids[vport->vpi]);
10899 }
10900
10901 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10902 if (elscmd == ELS_CMD_ECHO)
10903 bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
10904 else
10905 bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
10906 }
10907}
10908
10909void
10910lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10911 struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
10912 u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
10913 u8 expect_rsp)
10914{
10915 phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
10916 elscmd, tmo, expect_rsp);
10917}
10918
10919static void
10920__lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10921 u16 rpi, u32 num_entry, u8 tmo)
10922{
10923 IOCB_t *cmd;
10924
10925 cmd = &cmdiocbq->iocb;
10926 memset(cmd, 0, sizeof(*cmd));
10927
10928 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10929 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10930 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10931 cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
10932
10933 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
10934 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
10935 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
10936
10937 cmd->ulpContext = rpi;
10938 cmd->ulpClass = CLASS3;
10939 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
10940 cmd->ulpBdeCount = 1;
10941 cmd->ulpLe = 1;
10942 cmd->ulpOwner = OWN_CHIP;
10943 cmd->ulpTimeout = tmo;
10944}
10945
10946static void
10947__lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10948 u16 rpi, u32 num_entry, u8 tmo)
10949{
10950 union lpfc_wqe128 *cmdwqe;
10951 struct ulp_bde64_le *bde, *bpl;
10952 u32 xmit_len = 0, total_len = 0, size, type, i;
10953
10954 cmdwqe = &cmdiocbq->wqe;
10955 memset(cmdwqe, 0, sizeof(*cmdwqe));
10956
10957 /* Calculate total_len and xmit_len */
10958 bpl = (struct ulp_bde64_le *)bmp->virt;
10959 for (i = 0; i < num_entry; i++) {
10960 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10961 total_len += size;
10962 }
10963 for (i = 0; i < num_entry; i++) {
10964 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10965 type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
10966 if (type != ULP_BDE64_TYPE_BDE_64)
10967 break;
10968 xmit_len += size;
10969 }
10970
10971 /* Words 0 - 2 */
10972 bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
10973 bde->addr_low = bpl->addr_low;
10974 bde->addr_high = bpl->addr_high;
10975 bde->type_size = cpu_to_le32(xmit_len);
10976 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10977
10978 /* Word 3 */
10979 cmdwqe->gen_req.request_payload_len = xmit_len;
10980
10981 /* Word 5 */
10982 bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
10983 bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
10984 bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
10985 bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
10986
10987 /* Word 6 */
10988 bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
10989
10990 /* Word 7 */
10991 bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
10992 bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
10993 bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
10994 bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
10995
10996 /* Word 12 */
10997 cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
10998}
10999
11000void
11001lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11002 struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
11003{
11004 phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
11005}
11006
11007static void
11008__lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
11009 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11010 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11011{
11012 IOCB_t *icmd;
11013
11014 icmd = &cmdiocbq->iocb;
11015 memset(icmd, 0, sizeof(*icmd));
11016
11017 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
11018 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
11019 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
11020 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
11021 icmd->un.xseq64.w5.hcsw.Fctl = LA;
11022 if (last_seq)
11023 icmd->un.xseq64.w5.hcsw.Fctl |= LS;
11024 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
11025 icmd->un.xseq64.w5.hcsw.Rctl = rctl;
11026 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
11027
11028 icmd->ulpBdeCount = 1;
11029 icmd->ulpLe = 1;
11030 icmd->ulpClass = CLASS3;
11031
11032 switch (cr_cx_cmd) {
11033 case CMD_XMIT_SEQUENCE64_CR:
11034 icmd->ulpContext = rpi;
11035 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
11036 break;
11037 case CMD_XMIT_SEQUENCE64_CX:
11038 icmd->ulpContext = ox_id;
11039 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
11040 break;
11041 default:
11042 break;
11043 }
11044}
11045
11046static void
11047__lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
11048 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11049 u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11050{
11051 union lpfc_wqe128 *wqe;
11052 struct ulp_bde64 *bpl;
11053
11054 wqe = &cmdiocbq->wqe;
11055 memset(wqe, 0, sizeof(*wqe));
11056
11057 /* Words 0 - 2 */
11058 bpl = (struct ulp_bde64 *)bmp->virt;
11059 wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
11060 wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
11061 wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
11062
11063 /* Word 5 */
11064 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
11065 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
11066 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
11067 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
11068 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
11069
11070 /* Word 6 */
11071 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
11072
11073 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
11074 CMD_XMIT_SEQUENCE64_WQE);
11075
11076 /* Word 7 */
11077 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
11078
11079 /* Word 9 */
11080 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
11081
11082 /* Word 12 */
11083 if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
11084 wqe->xmit_sequence.xmit_len = full_size;
11085 else
11086 wqe->xmit_sequence.xmit_len =
11087 wqe->xmit_sequence.bde.tus.f.bdeSize;
11088}
11089
11090void
11091lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11092 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11093 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11094{
11095 phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
11096 rctl, last_seq, cr_cx_cmd);
11097}
11098
11099static void
11100__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
11101 u16 iotag, u8 ulp_class, u16 cqid, bool ia,
11102 bool wqec)
11103{
11104 IOCB_t *icmd = NULL;
11105
11106 icmd = &cmdiocbq->iocb;
11107 memset(icmd, 0, sizeof(*icmd));
11108
11109 /* Word 5 */
11110 icmd->un.acxri.abortContextTag = ulp_context;
11111 icmd->un.acxri.abortIoTag = iotag;
11112
11113 if (ia) {
11114 /* Word 7 */
11115 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
11116 } else {
11117 /* Word 3 */
11118 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
11119
11120 /* Word 7 */
11121 icmd->ulpClass = ulp_class;
11122 icmd->ulpCommand = CMD_ABORT_XRI_CN;
11123 }
11124
11125 /* Word 7 */
11126 icmd->ulpLe = 1;
11127}
11128
11129static void
11130__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
11131 u16 iotag, u8 ulp_class, u16 cqid, bool ia,
11132 bool wqec)
11133{
11134 union lpfc_wqe128 *wqe;
11135
11136 wqe = &cmdiocbq->wqe;
11137 memset(wqe, 0, sizeof(*wqe));
11138
11139 /* Word 3 */
11140 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
11141 if (ia)
11142 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
11143 else
11144 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
11145
11146 /* Word 7 */
11147 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
11148
11149 /* Word 8 */
11150 wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
11151
11152 /* Word 9 */
11153 bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
11154
11155 /* Word 10 */
11156 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
11157
11158 /* Word 11 */
11159 if (wqec)
11160 bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
11161 bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
11162 bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11163}
11164
11165void
11166lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11167 u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
11168 bool ia, bool wqec)
11169{
11170 phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
11171 cqid, ia, wqec);
11172}
11173
11174/**
11175 * lpfc_sli_api_table_setup - Set up sli api function jump table
11176 * @phba: The hba struct for which this call is being executed.
11177 * @dev_grp: The HBA PCI-Device group number.
11178 *
11179 * This routine sets up the SLI interface API function jump table in @phba
11180 * struct.
11181 * Returns: 0 - success, -ENODEV - failure.
11182 **/
11183int
11184lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
11185{
11186
11187 switch (dev_grp) {
11188 case LPFC_PCI_DEV_LP:
11189 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
11190 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
11191 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
11192 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
11193 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
11194 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
11195 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
11196 break;
11197 case LPFC_PCI_DEV_OC:
11198 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
11199 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
11200 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
11201 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
11202 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
11203 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
11204 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
11205 break;
11206 default:
11207 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11208 "1419 Invalid HBA PCI-device group: 0x%x\n",
11209 dev_grp);
11210 return -ENODEV;
11211 }
11212 return 0;
11213}
11214
11215/**
11216 * lpfc_sli4_calc_ring - Calculates which ring to use
11217 * @phba: Pointer to HBA context object.
11218 * @piocb: Pointer to command iocb.
11219 *
11220 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
11221 * hba_wqidx, thus we need to calculate the corresponding ring.
11222 * Since ABORTS must go on the same WQ of the command they are
11223 * aborting, we use command's hba_wqidx.
11224 */
11225struct lpfc_sli_ring *
11226lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
11227{
11228 struct lpfc_io_buf *lpfc_cmd;
11229
11230 if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
11231 if (unlikely(!phba->sli4_hba.hdwq))
11232 return NULL;
11233 /*
11234 * for abort iocb hba_wqidx should already
11235 * be setup based on what work queue we used.
11236 */
11237 if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
11238 lpfc_cmd = piocb->io_buf;
11239 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
11240 }
11241 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
11242 } else {
11243 if (unlikely(!phba->sli4_hba.els_wq))
11244 return NULL;
11245 piocb->hba_wqidx = 0;
11246 return phba->sli4_hba.els_wq->pring;
11247 }
11248}
11249
11250inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
11251{
11252 struct lpfc_hba *phba = eq->phba;
11253
11254 /*
11255 * Unlocking an irq is one of the entry point to check
11256 * for re-schedule, but we are good for io submission
11257 * path as midlayer does a get_cpu to glue us in. Flush
11258 * out the invalidate queue so we can see the updated
11259 * value for flag.
11260 */
11261 smp_rmb();
11262
11263 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
11264 /* We will not likely get the completion for the caller
11265 * during this iteration but i guess that's fine.
11266 * Future io's coming on this eq should be able to
11267 * pick it up. As for the case of single io's, they
11268 * will be handled through a sched from polling timer
11269 * function which is currently triggered every 1msec.
11270 */
11271 lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM,
11272 LPFC_QUEUE_WORK);
11273}
11274
11275/**
11276 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
11277 * @phba: Pointer to HBA context object.
11278 * @ring_number: Ring number
11279 * @piocb: Pointer to command iocb.
11280 * @flag: Flag indicating if this command can be put into txq.
11281 *
11282 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
11283 * function. This function gets the hbalock and calls
11284 * __lpfc_sli_issue_iocb function and will return the error returned
11285 * by __lpfc_sli_issue_iocb function. This wrapper is used by
11286 * functions which do not hold hbalock.
11287 **/
11288int
11289lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11290 struct lpfc_iocbq *piocb, uint32_t flag)
11291{
11292 struct lpfc_sli_ring *pring;
11293 struct lpfc_queue *eq;
11294 unsigned long iflags;
11295 int rc;
11296
11297 /* If the PCI channel is in offline state, do not post iocbs. */
11298 if (unlikely(pci_channel_offline(phba->pcidev)))
11299 return IOCB_ERROR;
11300
11301 if (phba->sli_rev == LPFC_SLI_REV4) {
11302 lpfc_sli_prep_wqe(phba, piocb);
11303
11304 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11305
11306 pring = lpfc_sli4_calc_ring(phba, piocb);
11307 if (unlikely(pring == NULL))
11308 return IOCB_ERROR;
11309
11310 spin_lock_irqsave(&pring->ring_lock, iflags);
11311 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11312 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11313
11314 lpfc_sli4_poll_eq(eq);
11315 } else {
11316 /* For now, SLI2/3 will still use hbalock */
11317 spin_lock_irqsave(&phba->hbalock, iflags);
11318 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11319 spin_unlock_irqrestore(&phba->hbalock, iflags);
11320 }
11321 return rc;
11322}
11323
11324/**
11325 * lpfc_extra_ring_setup - Extra ring setup function
11326 * @phba: Pointer to HBA context object.
11327 *
11328 * This function is called while driver attaches with the
11329 * HBA to setup the extra ring. The extra ring is used
11330 * only when driver needs to support target mode functionality
11331 * or IP over FC functionalities.
11332 *
11333 * This function is called with no lock held. SLI3 only.
11334 **/
11335static int
11336lpfc_extra_ring_setup( struct lpfc_hba *phba)
11337{
11338 struct lpfc_sli *psli;
11339 struct lpfc_sli_ring *pring;
11340
11341 psli = &phba->sli;
11342
11343 /* Adjust cmd/rsp ring iocb entries more evenly */
11344
11345 /* Take some away from the FCP ring */
11346 pring = &psli->sli3_ring[LPFC_FCP_RING];
11347 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11348 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11349 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11350 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11351
11352 /* and give them to the extra ring */
11353 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
11354
11355 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11356 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11357 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11358 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11359
11360 /* Setup default profile for this ring */
11361 pring->iotag_max = 4096;
11362 pring->num_mask = 1;
11363 pring->prt[0].profile = 0; /* Mask 0 */
11364 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
11365 pring->prt[0].type = phba->cfg_multi_ring_type;
11366 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
11367 return 0;
11368}
11369
11370static void
11371lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
11372 struct lpfc_nodelist *ndlp)
11373{
11374 unsigned long iflags;
11375 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
11376
11377 /* Hold a node reference for outstanding queued work */
11378 if (!lpfc_nlp_get(ndlp))
11379 return;
11380
11381 spin_lock_irqsave(&phba->hbalock, iflags);
11382 if (!list_empty(&evtp->evt_listp)) {
11383 spin_unlock_irqrestore(&phba->hbalock, iflags);
11384 lpfc_nlp_put(ndlp);
11385 return;
11386 }
11387
11388 evtp->evt_arg1 = ndlp;
11389 evtp->evt = LPFC_EVT_RECOVER_PORT;
11390 list_add_tail(&evtp->evt_listp, &phba->work_list);
11391 spin_unlock_irqrestore(&phba->hbalock, iflags);
11392
11393 lpfc_worker_wake_up(phba);
11394}
11395
11396/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
11397 * @phba: Pointer to HBA context object.
11398 * @iocbq: Pointer to iocb object.
11399 *
11400 * The async_event handler calls this routine when it receives
11401 * an ASYNC_STATUS_CN event from the port. The port generates
11402 * this event when an Abort Sequence request to an rport fails
11403 * twice in succession. The abort could be originated by the
11404 * driver or by the port. The ABTS could have been for an ELS
11405 * or FCP IO. The port only generates this event when an ABTS
11406 * fails to complete after one retry.
11407 */
11408static void
11409lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
11410 struct lpfc_iocbq *iocbq)
11411{
11412 struct lpfc_nodelist *ndlp = NULL;
11413 uint16_t rpi = 0, vpi = 0;
11414 struct lpfc_vport *vport = NULL;
11415
11416 /* The rpi in the ulpContext is vport-sensitive. */
11417 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
11418 rpi = iocbq->iocb.ulpContext;
11419
11420 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11421 "3092 Port generated ABTS async event "
11422 "on vpi %d rpi %d status 0x%x\n",
11423 vpi, rpi, iocbq->iocb.ulpStatus);
11424
11425 vport = lpfc_find_vport_by_vpid(phba, vpi);
11426 if (!vport)
11427 goto err_exit;
11428 ndlp = lpfc_findnode_rpi(vport, rpi);
11429 if (!ndlp)
11430 goto err_exit;
11431
11432 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
11433 lpfc_sli_abts_recover_port(vport, ndlp);
11434 return;
11435
11436 err_exit:
11437 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11438 "3095 Event Context not found, no "
11439 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
11440 vpi, rpi, iocbq->iocb.ulpStatus,
11441 iocbq->iocb.ulpContext);
11442}
11443
11444/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
11445 * @phba: pointer to HBA context object.
11446 * @ndlp: nodelist pointer for the impacted rport.
11447 * @axri: pointer to the wcqe containing the failed exchange.
11448 *
11449 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
11450 * port. The port generates this event when an abort exchange request to an
11451 * rport fails twice in succession with no reply. The abort could be originated
11452 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
11453 */
11454void
11455lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
11456 struct lpfc_nodelist *ndlp,
11457 struct sli4_wcqe_xri_aborted *axri)
11458{
11459 uint32_t ext_status = 0;
11460
11461 if (!ndlp) {
11462 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11463 "3115 Node Context not found, driver "
11464 "ignoring abts err event\n");
11465 return;
11466 }
11467
11468 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11469 "3116 Port generated FCP XRI ABORT event on "
11470 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
11471 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11472 bf_get(lpfc_wcqe_xa_xri, axri),
11473 bf_get(lpfc_wcqe_xa_status, axri),
11474 axri->parameter);
11475
11476 /*
11477 * Catch the ABTS protocol failure case. Older OCe FW releases returned
11478 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
11479 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
11480 */
11481 ext_status = axri->parameter & IOERR_PARAM_MASK;
11482 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
11483 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
11484 lpfc_sli_post_recovery_event(phba, ndlp);
11485}
11486
11487/**
11488 * lpfc_sli_async_event_handler - ASYNC iocb handler function
11489 * @phba: Pointer to HBA context object.
11490 * @pring: Pointer to driver SLI ring object.
11491 * @iocbq: Pointer to iocb object.
11492 *
11493 * This function is called by the slow ring event handler
11494 * function when there is an ASYNC event iocb in the ring.
11495 * This function is called with no lock held.
11496 * Currently this function handles only temperature related
11497 * ASYNC events. The function decodes the temperature sensor
11498 * event message and posts events for the management applications.
11499 **/
11500static void
11501lpfc_sli_async_event_handler(struct lpfc_hba * phba,
11502 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
11503{
11504 IOCB_t *icmd;
11505 uint16_t evt_code;
11506 struct temp_event temp_event_data;
11507 struct Scsi_Host *shost;
11508 uint32_t *iocb_w;
11509
11510 icmd = &iocbq->iocb;
11511 evt_code = icmd->un.asyncstat.evt_code;
11512
11513 switch (evt_code) {
11514 case ASYNC_TEMP_WARN:
11515 case ASYNC_TEMP_SAFE:
11516 temp_event_data.data = (uint32_t) icmd->ulpContext;
11517 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
11518 if (evt_code == ASYNC_TEMP_WARN) {
11519 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
11520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11521 "0347 Adapter is very hot, please take "
11522 "corrective action. temperature : %d Celsius\n",
11523 (uint32_t) icmd->ulpContext);
11524 } else {
11525 temp_event_data.event_code = LPFC_NORMAL_TEMP;
11526 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11527 "0340 Adapter temperature is OK now. "
11528 "temperature : %d Celsius\n",
11529 (uint32_t) icmd->ulpContext);
11530 }
11531
11532 /* Send temperature change event to applications */
11533 shost = lpfc_shost_from_vport(phba->pport);
11534 fc_host_post_vendor_event(shost, fc_get_event_number(),
11535 sizeof(temp_event_data), (char *) &temp_event_data,
11536 LPFC_NL_VENDOR_ID);
11537 break;
11538 case ASYNC_STATUS_CN:
11539 lpfc_sli_abts_err_handler(phba, iocbq);
11540 break;
11541 default:
11542 iocb_w = (uint32_t *) icmd;
11543 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11544 "0346 Ring %d handler: unexpected ASYNC_STATUS"
11545 " evt_code 0x%x\n"
11546 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
11547 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
11548 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
11549 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
11550 pring->ringno, icmd->un.asyncstat.evt_code,
11551 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
11552 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
11553 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
11554 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
11555
11556 break;
11557 }
11558}
11559
11560
11561/**
11562 * lpfc_sli4_setup - SLI ring setup function
11563 * @phba: Pointer to HBA context object.
11564 *
11565 * lpfc_sli_setup sets up rings of the SLI interface with
11566 * number of iocbs per ring and iotags. This function is
11567 * called while driver attach to the HBA and before the
11568 * interrupts are enabled. So there is no need for locking.
11569 *
11570 * This function always returns 0.
11571 **/
11572int
11573lpfc_sli4_setup(struct lpfc_hba *phba)
11574{
11575 struct lpfc_sli_ring *pring;
11576
11577 pring = phba->sli4_hba.els_wq->pring;
11578 pring->num_mask = LPFC_MAX_RING_MASK;
11579 pring->prt[0].profile = 0; /* Mask 0 */
11580 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11581 pring->prt[0].type = FC_TYPE_ELS;
11582 pring->prt[0].lpfc_sli_rcv_unsol_event =
11583 lpfc_els_unsol_event;
11584 pring->prt[1].profile = 0; /* Mask 1 */
11585 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11586 pring->prt[1].type = FC_TYPE_ELS;
11587 pring->prt[1].lpfc_sli_rcv_unsol_event =
11588 lpfc_els_unsol_event;
11589 pring->prt[2].profile = 0; /* Mask 2 */
11590 /* NameServer Inquiry */
11591 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11592 /* NameServer */
11593 pring->prt[2].type = FC_TYPE_CT;
11594 pring->prt[2].lpfc_sli_rcv_unsol_event =
11595 lpfc_ct_unsol_event;
11596 pring->prt[3].profile = 0; /* Mask 3 */
11597 /* NameServer response */
11598 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11599 /* NameServer */
11600 pring->prt[3].type = FC_TYPE_CT;
11601 pring->prt[3].lpfc_sli_rcv_unsol_event =
11602 lpfc_ct_unsol_event;
11603 return 0;
11604}
11605
11606/**
11607 * lpfc_sli_setup - SLI ring setup function
11608 * @phba: Pointer to HBA context object.
11609 *
11610 * lpfc_sli_setup sets up rings of the SLI interface with
11611 * number of iocbs per ring and iotags. This function is
11612 * called while driver attach to the HBA and before the
11613 * interrupts are enabled. So there is no need for locking.
11614 *
11615 * This function always returns 0. SLI3 only.
11616 **/
11617int
11618lpfc_sli_setup(struct lpfc_hba *phba)
11619{
11620 int i, totiocbsize = 0;
11621 struct lpfc_sli *psli = &phba->sli;
11622 struct lpfc_sli_ring *pring;
11623
11624 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
11625 psli->sli_flag = 0;
11626
11627 psli->iocbq_lookup = NULL;
11628 psli->iocbq_lookup_len = 0;
11629 psli->last_iotag = 0;
11630
11631 for (i = 0; i < psli->num_rings; i++) {
11632 pring = &psli->sli3_ring[i];
11633 switch (i) {
11634 case LPFC_FCP_RING: /* ring 0 - FCP */
11635 /* numCiocb and numRiocb are used in config_port */
11636 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
11637 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
11638 pring->sli.sli3.numCiocb +=
11639 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11640 pring->sli.sli3.numRiocb +=
11641 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11642 pring->sli.sli3.numCiocb +=
11643 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11644 pring->sli.sli3.numRiocb +=
11645 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11646 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11647 SLI3_IOCB_CMD_SIZE :
11648 SLI2_IOCB_CMD_SIZE;
11649 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11650 SLI3_IOCB_RSP_SIZE :
11651 SLI2_IOCB_RSP_SIZE;
11652 pring->iotag_ctr = 0;
11653 pring->iotag_max =
11654 (phba->cfg_hba_queue_depth * 2);
11655 pring->fast_iotag = pring->iotag_max;
11656 pring->num_mask = 0;
11657 break;
11658 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
11659 /* numCiocb and numRiocb are used in config_port */
11660 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
11661 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
11662 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11663 SLI3_IOCB_CMD_SIZE :
11664 SLI2_IOCB_CMD_SIZE;
11665 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11666 SLI3_IOCB_RSP_SIZE :
11667 SLI2_IOCB_RSP_SIZE;
11668 pring->iotag_max = phba->cfg_hba_queue_depth;
11669 pring->num_mask = 0;
11670 break;
11671 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
11672 /* numCiocb and numRiocb are used in config_port */
11673 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
11674 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
11675 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11676 SLI3_IOCB_CMD_SIZE :
11677 SLI2_IOCB_CMD_SIZE;
11678 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11679 SLI3_IOCB_RSP_SIZE :
11680 SLI2_IOCB_RSP_SIZE;
11681 pring->fast_iotag = 0;
11682 pring->iotag_ctr = 0;
11683 pring->iotag_max = 4096;
11684 pring->lpfc_sli_rcv_async_status =
11685 lpfc_sli_async_event_handler;
11686 pring->num_mask = LPFC_MAX_RING_MASK;
11687 pring->prt[0].profile = 0; /* Mask 0 */
11688 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11689 pring->prt[0].type = FC_TYPE_ELS;
11690 pring->prt[0].lpfc_sli_rcv_unsol_event =
11691 lpfc_els_unsol_event;
11692 pring->prt[1].profile = 0; /* Mask 1 */
11693 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11694 pring->prt[1].type = FC_TYPE_ELS;
11695 pring->prt[1].lpfc_sli_rcv_unsol_event =
11696 lpfc_els_unsol_event;
11697 pring->prt[2].profile = 0; /* Mask 2 */
11698 /* NameServer Inquiry */
11699 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11700 /* NameServer */
11701 pring->prt[2].type = FC_TYPE_CT;
11702 pring->prt[2].lpfc_sli_rcv_unsol_event =
11703 lpfc_ct_unsol_event;
11704 pring->prt[3].profile = 0; /* Mask 3 */
11705 /* NameServer response */
11706 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11707 /* NameServer */
11708 pring->prt[3].type = FC_TYPE_CT;
11709 pring->prt[3].lpfc_sli_rcv_unsol_event =
11710 lpfc_ct_unsol_event;
11711 break;
11712 }
11713 totiocbsize += (pring->sli.sli3.numCiocb *
11714 pring->sli.sli3.sizeCiocb) +
11715 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
11716 }
11717 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
11718 /* Too many cmd / rsp ring entries in SLI2 SLIM */
11719 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
11720 "SLI2 SLIM Data: x%x x%lx\n",
11721 phba->brd_no, totiocbsize,
11722 (unsigned long) MAX_SLIM_IOCB_SIZE);
11723 }
11724 if (phba->cfg_multi_ring_support == 2)
11725 lpfc_extra_ring_setup(phba);
11726
11727 return 0;
11728}
11729
11730/**
11731 * lpfc_sli4_queue_init - Queue initialization function
11732 * @phba: Pointer to HBA context object.
11733 *
11734 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11735 * ring. This function also initializes ring indices of each ring.
11736 * This function is called during the initialization of the SLI
11737 * interface of an HBA.
11738 * This function is called with no lock held and always returns
11739 * 1.
11740 **/
11741void
11742lpfc_sli4_queue_init(struct lpfc_hba *phba)
11743{
11744 struct lpfc_sli *psli;
11745 struct lpfc_sli_ring *pring;
11746 int i;
11747
11748 psli = &phba->sli;
11749 spin_lock_irq(&phba->hbalock);
11750 INIT_LIST_HEAD(&psli->mboxq);
11751 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11752 /* Initialize list headers for txq and txcmplq as double linked lists */
11753 for (i = 0; i < phba->cfg_hdw_queue; i++) {
11754 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11755 pring->flag = 0;
11756 pring->ringno = LPFC_FCP_RING;
11757 pring->txcmplq_cnt = 0;
11758 INIT_LIST_HEAD(&pring->txq);
11759 INIT_LIST_HEAD(&pring->txcmplq);
11760 INIT_LIST_HEAD(&pring->iocb_continueq);
11761 spin_lock_init(&pring->ring_lock);
11762 }
11763 pring = phba->sli4_hba.els_wq->pring;
11764 pring->flag = 0;
11765 pring->ringno = LPFC_ELS_RING;
11766 pring->txcmplq_cnt = 0;
11767 INIT_LIST_HEAD(&pring->txq);
11768 INIT_LIST_HEAD(&pring->txcmplq);
11769 INIT_LIST_HEAD(&pring->iocb_continueq);
11770 spin_lock_init(&pring->ring_lock);
11771
11772 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11773 pring = phba->sli4_hba.nvmels_wq->pring;
11774 pring->flag = 0;
11775 pring->ringno = LPFC_ELS_RING;
11776 pring->txcmplq_cnt = 0;
11777 INIT_LIST_HEAD(&pring->txq);
11778 INIT_LIST_HEAD(&pring->txcmplq);
11779 INIT_LIST_HEAD(&pring->iocb_continueq);
11780 spin_lock_init(&pring->ring_lock);
11781 }
11782
11783 spin_unlock_irq(&phba->hbalock);
11784}
11785
11786/**
11787 * lpfc_sli_queue_init - Queue initialization function
11788 * @phba: Pointer to HBA context object.
11789 *
11790 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11791 * ring. This function also initializes ring indices of each ring.
11792 * This function is called during the initialization of the SLI
11793 * interface of an HBA.
11794 * This function is called with no lock held and always returns
11795 * 1.
11796 **/
11797void
11798lpfc_sli_queue_init(struct lpfc_hba *phba)
11799{
11800 struct lpfc_sli *psli;
11801 struct lpfc_sli_ring *pring;
11802 int i;
11803
11804 psli = &phba->sli;
11805 spin_lock_irq(&phba->hbalock);
11806 INIT_LIST_HEAD(&psli->mboxq);
11807 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11808 /* Initialize list headers for txq and txcmplq as double linked lists */
11809 for (i = 0; i < psli->num_rings; i++) {
11810 pring = &psli->sli3_ring[i];
11811 pring->ringno = i;
11812 pring->sli.sli3.next_cmdidx = 0;
11813 pring->sli.sli3.local_getidx = 0;
11814 pring->sli.sli3.cmdidx = 0;
11815 INIT_LIST_HEAD(&pring->iocb_continueq);
11816 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11817 INIT_LIST_HEAD(&pring->postbufq);
11818 pring->flag = 0;
11819 INIT_LIST_HEAD(&pring->txq);
11820 INIT_LIST_HEAD(&pring->txcmplq);
11821 spin_lock_init(&pring->ring_lock);
11822 }
11823 spin_unlock_irq(&phba->hbalock);
11824}
11825
11826/**
11827 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11828 * @phba: Pointer to HBA context object.
11829 *
11830 * This routine flushes the mailbox command subsystem. It will unconditionally
11831 * flush all the mailbox commands in the three possible stages in the mailbox
11832 * command sub-system: pending mailbox command queue; the outstanding mailbox
11833 * command; and completed mailbox command queue. It is caller's responsibility
11834 * to make sure that the driver is in the proper state to flush the mailbox
11835 * command sub-system. Namely, the posting of mailbox commands into the
11836 * pending mailbox command queue from the various clients must be stopped;
11837 * either the HBA is in a state that it will never works on the outstanding
11838 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11839 * mailbox command has been completed.
11840 **/
11841static void
11842lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11843{
11844 LIST_HEAD(completions);
11845 struct lpfc_sli *psli = &phba->sli;
11846 LPFC_MBOXQ_t *pmb;
11847 unsigned long iflag;
11848
11849 /* Disable softirqs, including timers from obtaining phba->hbalock */
11850 local_bh_disable();
11851
11852 /* Flush all the mailbox commands in the mbox system */
11853 spin_lock_irqsave(&phba->hbalock, iflag);
11854
11855 /* The pending mailbox command queue */
11856 list_splice_init(&phba->sli.mboxq, &completions);
11857 /* The outstanding active mailbox command */
11858 if (psli->mbox_active) {
11859 list_add_tail(&psli->mbox_active->list, &completions);
11860 psli->mbox_active = NULL;
11861 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11862 }
11863 /* The completed mailbox command queue */
11864 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11865 spin_unlock_irqrestore(&phba->hbalock, iflag);
11866
11867 /* Enable softirqs again, done with phba->hbalock */
11868 local_bh_enable();
11869
11870 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11871 while (!list_empty(&completions)) {
11872 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11873 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11874 if (pmb->mbox_cmpl)
11875 pmb->mbox_cmpl(phba, pmb);
11876 }
11877}
11878
11879/**
11880 * lpfc_sli_host_down - Vport cleanup function
11881 * @vport: Pointer to virtual port object.
11882 *
11883 * lpfc_sli_host_down is called to clean up the resources
11884 * associated with a vport before destroying virtual
11885 * port data structures.
11886 * This function does following operations:
11887 * - Free discovery resources associated with this virtual
11888 * port.
11889 * - Free iocbs associated with this virtual port in
11890 * the txq.
11891 * - Send abort for all iocb commands associated with this
11892 * vport in txcmplq.
11893 *
11894 * This function is called with no lock held and always returns 1.
11895 **/
11896int
11897lpfc_sli_host_down(struct lpfc_vport *vport)
11898{
11899 LIST_HEAD(completions);
11900 struct lpfc_hba *phba = vport->phba;
11901 struct lpfc_sli *psli = &phba->sli;
11902 struct lpfc_queue *qp = NULL;
11903 struct lpfc_sli_ring *pring;
11904 struct lpfc_iocbq *iocb, *next_iocb;
11905 int i;
11906 unsigned long flags = 0;
11907 uint16_t prev_pring_flag;
11908
11909 lpfc_cleanup_discovery_resources(vport);
11910
11911 spin_lock_irqsave(&phba->hbalock, flags);
11912
11913 /*
11914 * Error everything on the txq since these iocbs
11915 * have not been given to the FW yet.
11916 * Also issue ABTS for everything on the txcmplq
11917 */
11918 if (phba->sli_rev != LPFC_SLI_REV4) {
11919 for (i = 0; i < psli->num_rings; i++) {
11920 pring = &psli->sli3_ring[i];
11921 prev_pring_flag = pring->flag;
11922 /* Only slow rings */
11923 if (pring->ringno == LPFC_ELS_RING) {
11924 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11925 /* Set the lpfc data pending flag */
11926 set_bit(LPFC_DATA_READY, &phba->data_flags);
11927 }
11928 list_for_each_entry_safe(iocb, next_iocb,
11929 &pring->txq, list) {
11930 if (iocb->vport != vport)
11931 continue;
11932 list_move_tail(&iocb->list, &completions);
11933 }
11934 list_for_each_entry_safe(iocb, next_iocb,
11935 &pring->txcmplq, list) {
11936 if (iocb->vport != vport)
11937 continue;
11938 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11939 NULL);
11940 }
11941 pring->flag = prev_pring_flag;
11942 }
11943 } else {
11944 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11945 pring = qp->pring;
11946 if (!pring)
11947 continue;
11948 if (pring == phba->sli4_hba.els_wq->pring) {
11949 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11950 /* Set the lpfc data pending flag */
11951 set_bit(LPFC_DATA_READY, &phba->data_flags);
11952 }
11953 prev_pring_flag = pring->flag;
11954 spin_lock(&pring->ring_lock);
11955 list_for_each_entry_safe(iocb, next_iocb,
11956 &pring->txq, list) {
11957 if (iocb->vport != vport)
11958 continue;
11959 list_move_tail(&iocb->list, &completions);
11960 }
11961 spin_unlock(&pring->ring_lock);
11962 list_for_each_entry_safe(iocb, next_iocb,
11963 &pring->txcmplq, list) {
11964 if (iocb->vport != vport)
11965 continue;
11966 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11967 NULL);
11968 }
11969 pring->flag = prev_pring_flag;
11970 }
11971 }
11972 spin_unlock_irqrestore(&phba->hbalock, flags);
11973
11974 /* Make sure HBA is alive */
11975 lpfc_issue_hb_tmo(phba);
11976
11977 /* Cancel all the IOCBs from the completions list */
11978 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11979 IOERR_SLI_DOWN);
11980 return 1;
11981}
11982
11983/**
11984 * lpfc_sli_hba_down - Resource cleanup function for the HBA
11985 * @phba: Pointer to HBA context object.
11986 *
11987 * This function cleans up all iocb, buffers, mailbox commands
11988 * while shutting down the HBA. This function is called with no
11989 * lock held and always returns 1.
11990 * This function does the following to cleanup driver resources:
11991 * - Free discovery resources for each virtual port
11992 * - Cleanup any pending fabric iocbs
11993 * - Iterate through the iocb txq and free each entry
11994 * in the list.
11995 * - Free up any buffer posted to the HBA
11996 * - Free mailbox commands in the mailbox queue.
11997 **/
11998int
11999lpfc_sli_hba_down(struct lpfc_hba *phba)
12000{
12001 LIST_HEAD(completions);
12002 struct lpfc_sli *psli = &phba->sli;
12003 struct lpfc_queue *qp = NULL;
12004 struct lpfc_sli_ring *pring;
12005 struct lpfc_dmabuf *buf_ptr;
12006 unsigned long flags = 0;
12007 int i;
12008
12009 /* Shutdown the mailbox command sub-system */
12010 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
12011
12012 lpfc_hba_down_prep(phba);
12013
12014 /* Disable softirqs, including timers from obtaining phba->hbalock */
12015 local_bh_disable();
12016
12017 lpfc_fabric_abort_hba(phba);
12018
12019 spin_lock_irqsave(&phba->hbalock, flags);
12020
12021 /*
12022 * Error everything on the txq since these iocbs
12023 * have not been given to the FW yet.
12024 */
12025 if (phba->sli_rev != LPFC_SLI_REV4) {
12026 for (i = 0; i < psli->num_rings; i++) {
12027 pring = &psli->sli3_ring[i];
12028 /* Only slow rings */
12029 if (pring->ringno == LPFC_ELS_RING) {
12030 pring->flag |= LPFC_DEFERRED_RING_EVENT;
12031 /* Set the lpfc data pending flag */
12032 set_bit(LPFC_DATA_READY, &phba->data_flags);
12033 }
12034 list_splice_init(&pring->txq, &completions);
12035 }
12036 } else {
12037 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12038 pring = qp->pring;
12039 if (!pring)
12040 continue;
12041 spin_lock(&pring->ring_lock);
12042 list_splice_init(&pring->txq, &completions);
12043 spin_unlock(&pring->ring_lock);
12044 if (pring == phba->sli4_hba.els_wq->pring) {
12045 pring->flag |= LPFC_DEFERRED_RING_EVENT;
12046 /* Set the lpfc data pending flag */
12047 set_bit(LPFC_DATA_READY, &phba->data_flags);
12048 }
12049 }
12050 }
12051 spin_unlock_irqrestore(&phba->hbalock, flags);
12052
12053 /* Cancel all the IOCBs from the completions list */
12054 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12055 IOERR_SLI_DOWN);
12056
12057 spin_lock_irqsave(&phba->hbalock, flags);
12058 list_splice_init(&phba->elsbuf, &completions);
12059 phba->elsbuf_cnt = 0;
12060 phba->elsbuf_prev_cnt = 0;
12061 spin_unlock_irqrestore(&phba->hbalock, flags);
12062
12063 while (!list_empty(&completions)) {
12064 list_remove_head(&completions, buf_ptr,
12065 struct lpfc_dmabuf, list);
12066 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
12067 kfree(buf_ptr);
12068 }
12069
12070 /* Enable softirqs again, done with phba->hbalock */
12071 local_bh_enable();
12072
12073 /* Return any active mbox cmds */
12074 del_timer_sync(&psli->mbox_tmo);
12075
12076 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
12077 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12078 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
12079
12080 return 1;
12081}
12082
12083/**
12084 * lpfc_sli_pcimem_bcopy - SLI memory copy function
12085 * @srcp: Source memory pointer.
12086 * @destp: Destination memory pointer.
12087 * @cnt: Number of words required to be copied.
12088 *
12089 * This function is used for copying data between driver memory
12090 * and the SLI memory. This function also changes the endianness
12091 * of each word if native endianness is different from SLI
12092 * endianness. This function can be called with or without
12093 * lock.
12094 **/
12095void
12096lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
12097{
12098 uint32_t *src = srcp;
12099 uint32_t *dest = destp;
12100 uint32_t ldata;
12101 int i;
12102
12103 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
12104 ldata = *src;
12105 ldata = le32_to_cpu(ldata);
12106 *dest = ldata;
12107 src++;
12108 dest++;
12109 }
12110}
12111
12112
12113/**
12114 * lpfc_sli_bemem_bcopy - SLI memory copy function
12115 * @srcp: Source memory pointer.
12116 * @destp: Destination memory pointer.
12117 * @cnt: Number of words required to be copied.
12118 *
12119 * This function is used for copying data between a data structure
12120 * with big endian representation to local endianness.
12121 * This function can be called with or without lock.
12122 **/
12123void
12124lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
12125{
12126 uint32_t *src = srcp;
12127 uint32_t *dest = destp;
12128 uint32_t ldata;
12129 int i;
12130
12131 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
12132 ldata = *src;
12133 ldata = be32_to_cpu(ldata);
12134 *dest = ldata;
12135 src++;
12136 dest++;
12137 }
12138}
12139
12140/**
12141 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
12142 * @phba: Pointer to HBA context object.
12143 * @pring: Pointer to driver SLI ring object.
12144 * @mp: Pointer to driver buffer object.
12145 *
12146 * This function is called with no lock held.
12147 * It always return zero after adding the buffer to the postbufq
12148 * buffer list.
12149 **/
12150int
12151lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12152 struct lpfc_dmabuf *mp)
12153{
12154 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
12155 later */
12156 spin_lock_irq(&phba->hbalock);
12157 list_add_tail(&mp->list, &pring->postbufq);
12158 pring->postbufq_cnt++;
12159 spin_unlock_irq(&phba->hbalock);
12160 return 0;
12161}
12162
12163/**
12164 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
12165 * @phba: Pointer to HBA context object.
12166 *
12167 * When HBQ is enabled, buffers are searched based on tags. This function
12168 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
12169 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
12170 * does not conflict with tags of buffer posted for unsolicited events.
12171 * The function returns the allocated tag. The function is called with
12172 * no locks held.
12173 **/
12174uint32_t
12175lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
12176{
12177 spin_lock_irq(&phba->hbalock);
12178 phba->buffer_tag_count++;
12179 /*
12180 * Always set the QUE_BUFTAG_BIT to distiguish between
12181 * a tag assigned by HBQ.
12182 */
12183 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
12184 spin_unlock_irq(&phba->hbalock);
12185 return phba->buffer_tag_count;
12186}
12187
12188/**
12189 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
12190 * @phba: Pointer to HBA context object.
12191 * @pring: Pointer to driver SLI ring object.
12192 * @tag: Buffer tag.
12193 *
12194 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
12195 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
12196 * iocb is posted to the response ring with the tag of the buffer.
12197 * This function searches the pring->postbufq list using the tag
12198 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
12199 * iocb. If the buffer is found then lpfc_dmabuf object of the
12200 * buffer is returned to the caller else NULL is returned.
12201 * This function is called with no lock held.
12202 **/
12203struct lpfc_dmabuf *
12204lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12205 uint32_t tag)
12206{
12207 struct lpfc_dmabuf *mp, *next_mp;
12208 struct list_head *slp = &pring->postbufq;
12209
12210 /* Search postbufq, from the beginning, looking for a match on tag */
12211 spin_lock_irq(&phba->hbalock);
12212 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12213 if (mp->buffer_tag == tag) {
12214 list_del_init(&mp->list);
12215 pring->postbufq_cnt--;
12216 spin_unlock_irq(&phba->hbalock);
12217 return mp;
12218 }
12219 }
12220
12221 spin_unlock_irq(&phba->hbalock);
12222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12223 "0402 Cannot find virtual addr for buffer tag on "
12224 "ring %d Data x%lx x%px x%px x%x\n",
12225 pring->ringno, (unsigned long) tag,
12226 slp->next, slp->prev, pring->postbufq_cnt);
12227
12228 return NULL;
12229}
12230
12231/**
12232 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
12233 * @phba: Pointer to HBA context object.
12234 * @pring: Pointer to driver SLI ring object.
12235 * @phys: DMA address of the buffer.
12236 *
12237 * This function searches the buffer list using the dma_address
12238 * of unsolicited event to find the driver's lpfc_dmabuf object
12239 * corresponding to the dma_address. The function returns the
12240 * lpfc_dmabuf object if a buffer is found else it returns NULL.
12241 * This function is called by the ct and els unsolicited event
12242 * handlers to get the buffer associated with the unsolicited
12243 * event.
12244 *
12245 * This function is called with no lock held.
12246 **/
12247struct lpfc_dmabuf *
12248lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12249 dma_addr_t phys)
12250{
12251 struct lpfc_dmabuf *mp, *next_mp;
12252 struct list_head *slp = &pring->postbufq;
12253
12254 /* Search postbufq, from the beginning, looking for a match on phys */
12255 spin_lock_irq(&phba->hbalock);
12256 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12257 if (mp->phys == phys) {
12258 list_del_init(&mp->list);
12259 pring->postbufq_cnt--;
12260 spin_unlock_irq(&phba->hbalock);
12261 return mp;
12262 }
12263 }
12264
12265 spin_unlock_irq(&phba->hbalock);
12266 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12267 "0410 Cannot find virtual addr for mapped buf on "
12268 "ring %d Data x%llx x%px x%px x%x\n",
12269 pring->ringno, (unsigned long long)phys,
12270 slp->next, slp->prev, pring->postbufq_cnt);
12271 return NULL;
12272}
12273
12274/**
12275 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
12276 * @phba: Pointer to HBA context object.
12277 * @cmdiocb: Pointer to driver command iocb object.
12278 * @rspiocb: Pointer to driver response iocb object.
12279 *
12280 * This function is the completion handler for the abort iocbs for
12281 * ELS commands. This function is called from the ELS ring event
12282 * handler with no lock held. This function frees memory resources
12283 * associated with the abort iocb.
12284 **/
12285static void
12286lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12287 struct lpfc_iocbq *rspiocb)
12288{
12289 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12290 u32 ulp_word4 = get_job_word4(phba, rspiocb);
12291 u8 cmnd = get_job_cmnd(phba, cmdiocb);
12292
12293 if (ulp_status) {
12294 /*
12295 * Assume that the port already completed and returned, or
12296 * will return the iocb. Just Log the message.
12297 */
12298 if (phba->sli_rev < LPFC_SLI_REV4) {
12299 if (cmnd == CMD_ABORT_XRI_CX &&
12300 ulp_status == IOSTAT_LOCAL_REJECT &&
12301 ulp_word4 == IOERR_ABORT_REQUESTED) {
12302 goto release_iocb;
12303 }
12304 }
12305 }
12306
12307 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
12308 "0327 Abort els iocb complete x%px with io cmd xri %x "
12309 "abort tag x%x abort status %x abort code %x\n",
12310 cmdiocb, get_job_abtsiotag(phba, cmdiocb),
12311 (phba->sli_rev == LPFC_SLI_REV4) ?
12312 get_wqe_reqtag(cmdiocb) :
12313 cmdiocb->iocb.ulpIoTag,
12314 ulp_status, ulp_word4);
12315release_iocb:
12316 lpfc_sli_release_iocbq(phba, cmdiocb);
12317 return;
12318}
12319
12320/**
12321 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
12322 * @phba: Pointer to HBA context object.
12323 * @cmdiocb: Pointer to driver command iocb object.
12324 * @rspiocb: Pointer to driver response iocb object.
12325 *
12326 * The function is called from SLI ring event handler with no
12327 * lock held. This function is the completion handler for ELS commands
12328 * which are aborted. The function frees memory resources used for
12329 * the aborted ELS commands.
12330 **/
12331void
12332lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12333 struct lpfc_iocbq *rspiocb)
12334{
12335 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
12336 IOCB_t *irsp;
12337 LPFC_MBOXQ_t *mbox;
12338 u32 ulp_command, ulp_status, ulp_word4, iotag;
12339
12340 ulp_command = get_job_cmnd(phba, cmdiocb);
12341 ulp_status = get_job_ulpstatus(phba, rspiocb);
12342 ulp_word4 = get_job_word4(phba, rspiocb);
12343
12344 if (phba->sli_rev == LPFC_SLI_REV4) {
12345 iotag = get_wqe_reqtag(cmdiocb);
12346 } else {
12347 irsp = &rspiocb->iocb;
12348 iotag = irsp->ulpIoTag;
12349
12350 /* It is possible a PLOGI_RJT for NPIV ports to get aborted.
12351 * The MBX_REG_LOGIN64 mbox command is freed back to the
12352 * mbox_mem_pool here.
12353 */
12354 if (cmdiocb->context_un.mbox) {
12355 mbox = cmdiocb->context_un.mbox;
12356 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
12357 cmdiocb->context_un.mbox = NULL;
12358 }
12359 }
12360
12361 /* ELS cmd tag <ulpIoTag> completes */
12362 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12363 "0139 Ignoring ELS cmd code x%x ref cnt x%x Data: "
12364 "x%x x%x x%x x%px\n",
12365 ulp_command, kref_read(&cmdiocb->ndlp->kref),
12366 ulp_status, ulp_word4, iotag, cmdiocb->ndlp);
12367 /*
12368 * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
12369 * if exchange is busy.
12370 */
12371 if (ulp_command == CMD_GEN_REQUEST64_CR)
12372 lpfc_ct_free_iocb(phba, cmdiocb);
12373 else
12374 lpfc_els_free_iocb(phba, cmdiocb);
12375
12376 lpfc_nlp_put(ndlp);
12377}
12378
12379/**
12380 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
12381 * @phba: Pointer to HBA context object.
12382 * @pring: Pointer to driver SLI ring object.
12383 * @cmdiocb: Pointer to driver command iocb object.
12384 * @cmpl: completion function.
12385 *
12386 * This function issues an abort iocb for the provided command iocb. In case
12387 * of unloading, the abort iocb will not be issued to commands on the ELS
12388 * ring. Instead, the callback function shall be changed to those commands
12389 * so that nothing happens when them finishes. This function is called with
12390 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
12391 * when the command iocb is an abort request.
12392 *
12393 **/
12394int
12395lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12396 struct lpfc_iocbq *cmdiocb, void *cmpl)
12397{
12398 struct lpfc_vport *vport = cmdiocb->vport;
12399 struct lpfc_iocbq *abtsiocbp;
12400 int retval = IOCB_ERROR;
12401 unsigned long iflags;
12402 struct lpfc_nodelist *ndlp = NULL;
12403 u32 ulp_command = get_job_cmnd(phba, cmdiocb);
12404 u16 ulp_context, iotag;
12405 bool ia;
12406
12407 /*
12408 * There are certain command types we don't want to abort. And we
12409 * don't want to abort commands that are already in the process of
12410 * being aborted.
12411 */
12412 if (ulp_command == CMD_ABORT_XRI_WQE ||
12413 ulp_command == CMD_ABORT_XRI_CN ||
12414 ulp_command == CMD_CLOSE_XRI_CN ||
12415 cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
12416 return IOCB_ABORTING;
12417
12418 if (!pring) {
12419 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12420 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12421 else
12422 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12423 return retval;
12424 }
12425
12426 /*
12427 * If we're unloading, don't abort iocb on the ELS ring, but change
12428 * the callback so that nothing happens when it finishes.
12429 */
12430 if (test_bit(FC_UNLOADING, &vport->load_flag) &&
12431 pring->ringno == LPFC_ELS_RING) {
12432 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12433 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12434 else
12435 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12436 return retval;
12437 }
12438
12439 /* issue ABTS for this IOCB based on iotag */
12440 abtsiocbp = __lpfc_sli_get_iocbq(phba);
12441 if (abtsiocbp == NULL)
12442 return IOCB_NORESOURCE;
12443
12444 /* This signals the response to set the correct status
12445 * before calling the completion handler
12446 */
12447 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
12448
12449 if (phba->sli_rev == LPFC_SLI_REV4) {
12450 ulp_context = cmdiocb->sli4_xritag;
12451 iotag = abtsiocbp->iotag;
12452 } else {
12453 iotag = cmdiocb->iocb.ulpIoTag;
12454 if (pring->ringno == LPFC_ELS_RING) {
12455 ndlp = cmdiocb->ndlp;
12456 ulp_context = ndlp->nlp_rpi;
12457 } else {
12458 ulp_context = cmdiocb->iocb.ulpContext;
12459 }
12460 }
12461
12462 /* Just close the exchange under certain conditions. */
12463 if (test_bit(FC_UNLOADING, &vport->load_flag) ||
12464 phba->link_state < LPFC_LINK_UP ||
12465 (phba->sli_rev == LPFC_SLI_REV4 &&
12466 phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
12467 (phba->link_flag & LS_EXTERNAL_LOOPBACK))
12468 ia = true;
12469 else
12470 ia = false;
12471
12472 lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
12473 cmdiocb->iocb.ulpClass,
12474 LPFC_WQE_CQ_ID_DEFAULT, ia, false);
12475
12476 abtsiocbp->vport = vport;
12477
12478 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12479 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
12480 if (cmdiocb->cmd_flag & LPFC_IO_FCP)
12481 abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
12482
12483 if (cmdiocb->cmd_flag & LPFC_IO_FOF)
12484 abtsiocbp->cmd_flag |= LPFC_IO_FOF;
12485
12486 if (cmpl)
12487 abtsiocbp->cmd_cmpl = cmpl;
12488 else
12489 abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
12490 abtsiocbp->vport = vport;
12491
12492 if (phba->sli_rev == LPFC_SLI_REV4) {
12493 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
12494 if (unlikely(pring == NULL))
12495 goto abort_iotag_exit;
12496 /* Note: both hbalock and ring_lock need to be set here */
12497 spin_lock_irqsave(&pring->ring_lock, iflags);
12498 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12499 abtsiocbp, 0);
12500 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12501 } else {
12502 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12503 abtsiocbp, 0);
12504 }
12505
12506abort_iotag_exit:
12507
12508 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
12509 "0339 Abort IO XRI x%x, Original iotag x%x, "
12510 "abort tag x%x Cmdjob : x%px Abortjob : x%px "
12511 "retval x%x : IA %d cmd_cmpl %ps\n",
12512 ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
12513 cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
12514 retval, ia, abtsiocbp->cmd_cmpl);
12515 if (retval) {
12516 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
12517 __lpfc_sli_release_iocbq(phba, abtsiocbp);
12518 }
12519
12520 /*
12521 * Caller to this routine should check for IOCB_ERROR
12522 * and handle it properly. This routine no longer removes
12523 * iocb off txcmplq and call compl in case of IOCB_ERROR.
12524 */
12525 return retval;
12526}
12527
12528/**
12529 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
12530 * @phba: pointer to lpfc HBA data structure.
12531 *
12532 * This routine will abort all pending and outstanding iocbs to an HBA.
12533 **/
12534void
12535lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
12536{
12537 struct lpfc_sli *psli = &phba->sli;
12538 struct lpfc_sli_ring *pring;
12539 struct lpfc_queue *qp = NULL;
12540 int i;
12541
12542 if (phba->sli_rev != LPFC_SLI_REV4) {
12543 for (i = 0; i < psli->num_rings; i++) {
12544 pring = &psli->sli3_ring[i];
12545 lpfc_sli_abort_iocb_ring(phba, pring);
12546 }
12547 return;
12548 }
12549 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12550 pring = qp->pring;
12551 if (!pring)
12552 continue;
12553 lpfc_sli_abort_iocb_ring(phba, pring);
12554 }
12555}
12556
12557/**
12558 * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
12559 * @iocbq: Pointer to iocb object.
12560 * @vport: Pointer to driver virtual port object.
12561 *
12562 * This function acts as an iocb filter for functions which abort FCP iocbs.
12563 *
12564 * Return values
12565 * -ENODEV, if a null iocb or vport ptr is encountered
12566 * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
12567 * driver already started the abort process, or is an abort iocb itself
12568 * 0, passes criteria for aborting the FCP I/O iocb
12569 **/
12570static int
12571lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
12572 struct lpfc_vport *vport)
12573{
12574 u8 ulp_command;
12575
12576 /* No null ptr vports */
12577 if (!iocbq || iocbq->vport != vport)
12578 return -ENODEV;
12579
12580 /* iocb must be for FCP IO, already exists on the TX cmpl queue,
12581 * can't be premarked as driver aborted, nor be an ABORT iocb itself
12582 */
12583 ulp_command = get_job_cmnd(vport->phba, iocbq);
12584 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12585 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
12586 (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12587 (ulp_command == CMD_ABORT_XRI_CN ||
12588 ulp_command == CMD_CLOSE_XRI_CN ||
12589 ulp_command == CMD_ABORT_XRI_WQE))
12590 return -EINVAL;
12591
12592 return 0;
12593}
12594
12595/**
12596 * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
12597 * @iocbq: Pointer to driver iocb object.
12598 * @vport: Pointer to driver virtual port object.
12599 * @tgt_id: SCSI ID of the target.
12600 * @lun_id: LUN ID of the scsi device.
12601 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
12602 *
12603 * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
12604 * host.
12605 *
12606 * It will return
12607 * 0 if the filtering criteria is met for the given iocb and will return
12608 * 1 if the filtering criteria is not met.
12609 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
12610 * given iocb is for the SCSI device specified by vport, tgt_id and
12611 * lun_id parameter.
12612 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
12613 * given iocb is for the SCSI target specified by vport and tgt_id
12614 * parameters.
12615 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
12616 * given iocb is for the SCSI host associated with the given vport.
12617 * This function is called with no locks held.
12618 **/
12619static int
12620lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
12621 uint16_t tgt_id, uint64_t lun_id,
12622 lpfc_ctx_cmd ctx_cmd)
12623{
12624 struct lpfc_io_buf *lpfc_cmd;
12625 int rc = 1;
12626
12627 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12628
12629 if (lpfc_cmd->pCmd == NULL)
12630 return rc;
12631
12632 switch (ctx_cmd) {
12633 case LPFC_CTX_LUN:
12634 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12635 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
12636 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
12637 rc = 0;
12638 break;
12639 case LPFC_CTX_TGT:
12640 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12641 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
12642 rc = 0;
12643 break;
12644 case LPFC_CTX_HOST:
12645 rc = 0;
12646 break;
12647 default:
12648 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
12649 __func__, ctx_cmd);
12650 break;
12651 }
12652
12653 return rc;
12654}
12655
12656/**
12657 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
12658 * @vport: Pointer to virtual port.
12659 * @tgt_id: SCSI ID of the target.
12660 * @lun_id: LUN ID of the scsi device.
12661 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12662 *
12663 * This function returns number of FCP commands pending for the vport.
12664 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
12665 * commands pending on the vport associated with SCSI device specified
12666 * by tgt_id and lun_id parameters.
12667 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
12668 * commands pending on the vport associated with SCSI target specified
12669 * by tgt_id parameter.
12670 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
12671 * commands pending on the vport.
12672 * This function returns the number of iocbs which satisfy the filter.
12673 * This function is called without any lock held.
12674 **/
12675int
12676lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
12677 lpfc_ctx_cmd ctx_cmd)
12678{
12679 struct lpfc_hba *phba = vport->phba;
12680 struct lpfc_iocbq *iocbq;
12681 int sum, i;
12682 unsigned long iflags;
12683 u8 ulp_command;
12684
12685 spin_lock_irqsave(&phba->hbalock, iflags);
12686 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
12687 iocbq = phba->sli.iocbq_lookup[i];
12688
12689 if (!iocbq || iocbq->vport != vport)
12690 continue;
12691 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12692 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
12693 continue;
12694
12695 /* Include counting outstanding aborts */
12696 ulp_command = get_job_cmnd(phba, iocbq);
12697 if (ulp_command == CMD_ABORT_XRI_CN ||
12698 ulp_command == CMD_CLOSE_XRI_CN ||
12699 ulp_command == CMD_ABORT_XRI_WQE) {
12700 sum++;
12701 continue;
12702 }
12703
12704 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12705 ctx_cmd) == 0)
12706 sum++;
12707 }
12708 spin_unlock_irqrestore(&phba->hbalock, iflags);
12709
12710 return sum;
12711}
12712
12713/**
12714 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12715 * @phba: Pointer to HBA context object
12716 * @cmdiocb: Pointer to command iocb object.
12717 * @rspiocb: Pointer to response iocb object.
12718 *
12719 * This function is called when an aborted FCP iocb completes. This
12720 * function is called by the ring event handler with no lock held.
12721 * This function frees the iocb.
12722 **/
12723void
12724lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12725 struct lpfc_iocbq *rspiocb)
12726{
12727 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12728 "3096 ABORT_XRI_CX completing on rpi x%x "
12729 "original iotag x%x, abort cmd iotag x%x "
12730 "status 0x%x, reason 0x%x\n",
12731 (phba->sli_rev == LPFC_SLI_REV4) ?
12732 cmdiocb->sli4_xritag :
12733 cmdiocb->iocb.un.acxri.abortContextTag,
12734 get_job_abtsiotag(phba, cmdiocb),
12735 cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
12736 get_job_word4(phba, rspiocb));
12737 lpfc_sli_release_iocbq(phba, cmdiocb);
12738 return;
12739}
12740
12741/**
12742 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
12743 * @vport: Pointer to virtual port.
12744 * @tgt_id: SCSI ID of the target.
12745 * @lun_id: LUN ID of the scsi device.
12746 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12747 *
12748 * This function sends an abort command for every SCSI command
12749 * associated with the given virtual port pending on the ring
12750 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12751 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
12752 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12753 * followed by lpfc_sli_validate_fcp_iocb.
12754 *
12755 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
12756 * FCP iocbs associated with lun specified by tgt_id and lun_id
12757 * parameters
12758 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
12759 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12760 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
12761 * FCP iocbs associated with virtual port.
12762 * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
12763 * lpfc_sli4_calc_ring is used.
12764 * This function returns number of iocbs it failed to abort.
12765 * This function is called with no locks held.
12766 **/
12767int
12768lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12769 lpfc_ctx_cmd abort_cmd)
12770{
12771 struct lpfc_hba *phba = vport->phba;
12772 struct lpfc_sli_ring *pring = NULL;
12773 struct lpfc_iocbq *iocbq;
12774 int errcnt = 0, ret_val = 0;
12775 unsigned long iflags;
12776 int i;
12777
12778 /* all I/Os are in process of being flushed */
12779 if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag))
12780 return errcnt;
12781
12782 for (i = 1; i <= phba->sli.last_iotag; i++) {
12783 iocbq = phba->sli.iocbq_lookup[i];
12784
12785 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12786 continue;
12787
12788 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12789 abort_cmd) != 0)
12790 continue;
12791
12792 spin_lock_irqsave(&phba->hbalock, iflags);
12793 if (phba->sli_rev == LPFC_SLI_REV3) {
12794 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12795 } else if (phba->sli_rev == LPFC_SLI_REV4) {
12796 pring = lpfc_sli4_calc_ring(phba, iocbq);
12797 }
12798 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12799 lpfc_sli_abort_fcp_cmpl);
12800 spin_unlock_irqrestore(&phba->hbalock, iflags);
12801 if (ret_val != IOCB_SUCCESS)
12802 errcnt++;
12803 }
12804
12805 return errcnt;
12806}
12807
12808/**
12809 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12810 * @vport: Pointer to virtual port.
12811 * @pring: Pointer to driver SLI ring object.
12812 * @tgt_id: SCSI ID of the target.
12813 * @lun_id: LUN ID of the scsi device.
12814 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12815 *
12816 * This function sends an abort command for every SCSI command
12817 * associated with the given virtual port pending on the ring
12818 * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12819 * lpfc_sli_validate_fcp_iocb function. The ordering for validation before
12820 * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12821 * followed by lpfc_sli_validate_fcp_iocb.
12822 *
12823 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12824 * FCP iocbs associated with lun specified by tgt_id and lun_id
12825 * parameters
12826 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12827 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12828 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12829 * FCP iocbs associated with virtual port.
12830 * This function returns number of iocbs it aborted .
12831 * This function is called with no locks held right after a taskmgmt
12832 * command is sent.
12833 **/
12834int
12835lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12836 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12837{
12838 struct lpfc_hba *phba = vport->phba;
12839 struct lpfc_io_buf *lpfc_cmd;
12840 struct lpfc_iocbq *abtsiocbq;
12841 struct lpfc_nodelist *ndlp = NULL;
12842 struct lpfc_iocbq *iocbq;
12843 int sum, i, ret_val;
12844 unsigned long iflags;
12845 struct lpfc_sli_ring *pring_s4 = NULL;
12846 u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
12847 bool ia;
12848
12849 /* all I/Os are in process of being flushed */
12850 if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag))
12851 return 0;
12852
12853 sum = 0;
12854
12855 spin_lock_irqsave(&phba->hbalock, iflags);
12856 for (i = 1; i <= phba->sli.last_iotag; i++) {
12857 iocbq = phba->sli.iocbq_lookup[i];
12858
12859 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12860 continue;
12861
12862 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12863 cmd) != 0)
12864 continue;
12865
12866 /* Guard against IO completion being called at same time */
12867 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12868 spin_lock(&lpfc_cmd->buf_lock);
12869
12870 if (!lpfc_cmd->pCmd) {
12871 spin_unlock(&lpfc_cmd->buf_lock);
12872 continue;
12873 }
12874
12875 if (phba->sli_rev == LPFC_SLI_REV4) {
12876 pring_s4 =
12877 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12878 if (!pring_s4) {
12879 spin_unlock(&lpfc_cmd->buf_lock);
12880 continue;
12881 }
12882 /* Note: both hbalock and ring_lock must be set here */
12883 spin_lock(&pring_s4->ring_lock);
12884 }
12885
12886 /*
12887 * If the iocbq is already being aborted, don't take a second
12888 * action, but do count it.
12889 */
12890 if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12891 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
12892 if (phba->sli_rev == LPFC_SLI_REV4)
12893 spin_unlock(&pring_s4->ring_lock);
12894 spin_unlock(&lpfc_cmd->buf_lock);
12895 continue;
12896 }
12897
12898 /* issue ABTS for this IOCB based on iotag */
12899 abtsiocbq = __lpfc_sli_get_iocbq(phba);
12900 if (!abtsiocbq) {
12901 if (phba->sli_rev == LPFC_SLI_REV4)
12902 spin_unlock(&pring_s4->ring_lock);
12903 spin_unlock(&lpfc_cmd->buf_lock);
12904 continue;
12905 }
12906
12907 if (phba->sli_rev == LPFC_SLI_REV4) {
12908 iotag = abtsiocbq->iotag;
12909 ulp_context = iocbq->sli4_xritag;
12910 cqid = lpfc_cmd->hdwq->io_cq_map;
12911 } else {
12912 iotag = iocbq->iocb.ulpIoTag;
12913 if (pring->ringno == LPFC_ELS_RING) {
12914 ndlp = iocbq->ndlp;
12915 ulp_context = ndlp->nlp_rpi;
12916 } else {
12917 ulp_context = iocbq->iocb.ulpContext;
12918 }
12919 }
12920
12921 ndlp = lpfc_cmd->rdata->pnode;
12922
12923 if (lpfc_is_link_up(phba) &&
12924 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
12925 !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
12926 ia = false;
12927 else
12928 ia = true;
12929
12930 lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
12931 iocbq->iocb.ulpClass, cqid,
12932 ia, false);
12933
12934 abtsiocbq->vport = vport;
12935
12936 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12937 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12938 if (iocbq->cmd_flag & LPFC_IO_FCP)
12939 abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
12940 if (iocbq->cmd_flag & LPFC_IO_FOF)
12941 abtsiocbq->cmd_flag |= LPFC_IO_FOF;
12942
12943 /* Setup callback routine and issue the command. */
12944 abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
12945
12946 /*
12947 * Indicate the IO is being aborted by the driver and set
12948 * the caller's flag into the aborted IO.
12949 */
12950 iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
12951
12952 if (phba->sli_rev == LPFC_SLI_REV4) {
12953 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12954 abtsiocbq, 0);
12955 spin_unlock(&pring_s4->ring_lock);
12956 } else {
12957 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12958 abtsiocbq, 0);
12959 }
12960
12961 spin_unlock(&lpfc_cmd->buf_lock);
12962
12963 if (ret_val == IOCB_ERROR)
12964 __lpfc_sli_release_iocbq(phba, abtsiocbq);
12965 else
12966 sum++;
12967 }
12968 spin_unlock_irqrestore(&phba->hbalock, iflags);
12969 return sum;
12970}
12971
12972/**
12973 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12974 * @phba: Pointer to HBA context object.
12975 * @cmdiocbq: Pointer to command iocb.
12976 * @rspiocbq: Pointer to response iocb.
12977 *
12978 * This function is the completion handler for iocbs issued using
12979 * lpfc_sli_issue_iocb_wait function. This function is called by the
12980 * ring event handler function without any lock held. This function
12981 * can be called from both worker thread context and interrupt
12982 * context. This function also can be called from other thread which
12983 * cleans up the SLI layer objects.
12984 * This function copy the contents of the response iocb to the
12985 * response iocb memory object provided by the caller of
12986 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12987 * sleeps for the iocb completion.
12988 **/
12989static void
12990lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12991 struct lpfc_iocbq *cmdiocbq,
12992 struct lpfc_iocbq *rspiocbq)
12993{
12994 wait_queue_head_t *pdone_q;
12995 unsigned long iflags;
12996 struct lpfc_io_buf *lpfc_cmd;
12997 size_t offset = offsetof(struct lpfc_iocbq, wqe);
12998
12999 spin_lock_irqsave(&phba->hbalock, iflags);
13000 if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
13001
13002 /*
13003 * A time out has occurred for the iocb. If a time out
13004 * completion handler has been supplied, call it. Otherwise,
13005 * just free the iocbq.
13006 */
13007
13008 spin_unlock_irqrestore(&phba->hbalock, iflags);
13009 cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
13010 cmdiocbq->wait_cmd_cmpl = NULL;
13011 if (cmdiocbq->cmd_cmpl)
13012 cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
13013 else
13014 lpfc_sli_release_iocbq(phba, cmdiocbq);
13015 return;
13016 }
13017
13018 /* Copy the contents of the local rspiocb into the caller's buffer. */
13019 cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
13020 if (cmdiocbq->rsp_iocb && rspiocbq)
13021 memcpy((char *)cmdiocbq->rsp_iocb + offset,
13022 (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
13023
13024 /* Set the exchange busy flag for task management commands */
13025 if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
13026 !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
13027 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
13028 cur_iocbq);
13029 if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
13030 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
13031 else
13032 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
13033 }
13034
13035 pdone_q = cmdiocbq->context_un.wait_queue;
13036 if (pdone_q)
13037 wake_up(pdone_q);
13038 spin_unlock_irqrestore(&phba->hbalock, iflags);
13039 return;
13040}
13041
13042/**
13043 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
13044 * @phba: Pointer to HBA context object..
13045 * @piocbq: Pointer to command iocb.
13046 * @flag: Flag to test.
13047 *
13048 * This routine grabs the hbalock and then test the cmd_flag to
13049 * see if the passed in flag is set.
13050 * Returns:
13051 * 1 if flag is set.
13052 * 0 if flag is not set.
13053 **/
13054static int
13055lpfc_chk_iocb_flg(struct lpfc_hba *phba,
13056 struct lpfc_iocbq *piocbq, uint32_t flag)
13057{
13058 unsigned long iflags;
13059 int ret;
13060
13061 spin_lock_irqsave(&phba->hbalock, iflags);
13062 ret = piocbq->cmd_flag & flag;
13063 spin_unlock_irqrestore(&phba->hbalock, iflags);
13064 return ret;
13065
13066}
13067
13068/**
13069 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
13070 * @phba: Pointer to HBA context object..
13071 * @ring_number: Ring number
13072 * @piocb: Pointer to command iocb.
13073 * @prspiocbq: Pointer to response iocb.
13074 * @timeout: Timeout in number of seconds.
13075 *
13076 * This function issues the iocb to firmware and waits for the
13077 * iocb to complete. The cmd_cmpl field of the shall be used
13078 * to handle iocbs which time out. If the field is NULL, the
13079 * function shall free the iocbq structure. If more clean up is
13080 * needed, the caller is expected to provide a completion function
13081 * that will provide the needed clean up. If the iocb command is
13082 * not completed within timeout seconds, the function will either
13083 * free the iocbq structure (if cmd_cmpl == NULL) or execute the
13084 * completion function set in the cmd_cmpl field and then return
13085 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
13086 * resources if this function returns IOCB_TIMEDOUT.
13087 * The function waits for the iocb completion using an
13088 * non-interruptible wait.
13089 * This function will sleep while waiting for iocb completion.
13090 * So, this function should not be called from any context which
13091 * does not allow sleeping. Due to the same reason, this function
13092 * cannot be called with interrupt disabled.
13093 * This function assumes that the iocb completions occur while
13094 * this function sleep. So, this function cannot be called from
13095 * the thread which process iocb completion for this ring.
13096 * This function clears the cmd_flag of the iocb object before
13097 * issuing the iocb and the iocb completion handler sets this
13098 * flag and wakes this thread when the iocb completes.
13099 * The contents of the response iocb will be copied to prspiocbq
13100 * by the completion handler when the command completes.
13101 * This function returns IOCB_SUCCESS when success.
13102 * This function is called with no lock held.
13103 **/
13104int
13105lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
13106 uint32_t ring_number,
13107 struct lpfc_iocbq *piocb,
13108 struct lpfc_iocbq *prspiocbq,
13109 uint32_t timeout)
13110{
13111 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
13112 long timeleft, timeout_req = 0;
13113 int retval = IOCB_SUCCESS;
13114 uint32_t creg_val;
13115 struct lpfc_iocbq *iocb;
13116 int txq_cnt = 0;
13117 int txcmplq_cnt = 0;
13118 struct lpfc_sli_ring *pring;
13119 unsigned long iflags;
13120 bool iocb_completed = true;
13121
13122 if (phba->sli_rev >= LPFC_SLI_REV4) {
13123 lpfc_sli_prep_wqe(phba, piocb);
13124
13125 pring = lpfc_sli4_calc_ring(phba, piocb);
13126 } else
13127 pring = &phba->sli.sli3_ring[ring_number];
13128 /*
13129 * If the caller has provided a response iocbq buffer, then rsp_iocb
13130 * is NULL or its an error.
13131 */
13132 if (prspiocbq) {
13133 if (piocb->rsp_iocb)
13134 return IOCB_ERROR;
13135 piocb->rsp_iocb = prspiocbq;
13136 }
13137
13138 piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
13139 piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
13140 piocb->context_un.wait_queue = &done_q;
13141 piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
13142
13143 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13144 if (lpfc_readl(phba->HCregaddr, &creg_val))
13145 return IOCB_ERROR;
13146 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
13147 writel(creg_val, phba->HCregaddr);
13148 readl(phba->HCregaddr); /* flush */
13149 }
13150
13151 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
13152 SLI_IOCB_RET_IOCB);
13153 if (retval == IOCB_SUCCESS) {
13154 timeout_req = msecs_to_jiffies(timeout * 1000);
13155 timeleft = wait_event_timeout(done_q,
13156 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
13157 timeout_req);
13158 spin_lock_irqsave(&phba->hbalock, iflags);
13159 if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
13160
13161 /*
13162 * IOCB timed out. Inform the wake iocb wait
13163 * completion function and set local status
13164 */
13165
13166 iocb_completed = false;
13167 piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
13168 }
13169 spin_unlock_irqrestore(&phba->hbalock, iflags);
13170 if (iocb_completed) {
13171 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13172 "0331 IOCB wake signaled\n");
13173 /* Note: we are not indicating if the IOCB has a success
13174 * status or not - that's for the caller to check.
13175 * IOCB_SUCCESS means just that the command was sent and
13176 * completed. Not that it completed successfully.
13177 * */
13178 } else if (timeleft == 0) {
13179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13180 "0338 IOCB wait timeout error - no "
13181 "wake response Data x%x\n", timeout);
13182 retval = IOCB_TIMEDOUT;
13183 } else {
13184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13185 "0330 IOCB wake NOT set, "
13186 "Data x%x x%lx\n",
13187 timeout, (timeleft / jiffies));
13188 retval = IOCB_TIMEDOUT;
13189 }
13190 } else if (retval == IOCB_BUSY) {
13191 if (phba->cfg_log_verbose & LOG_SLI) {
13192 list_for_each_entry(iocb, &pring->txq, list) {
13193 txq_cnt++;
13194 }
13195 list_for_each_entry(iocb, &pring->txcmplq, list) {
13196 txcmplq_cnt++;
13197 }
13198 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13199 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
13200 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
13201 }
13202 return retval;
13203 } else {
13204 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13205 "0332 IOCB wait issue failed, Data x%x\n",
13206 retval);
13207 retval = IOCB_ERROR;
13208 }
13209
13210 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13211 if (lpfc_readl(phba->HCregaddr, &creg_val))
13212 return IOCB_ERROR;
13213 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
13214 writel(creg_val, phba->HCregaddr);
13215 readl(phba->HCregaddr); /* flush */
13216 }
13217
13218 if (prspiocbq)
13219 piocb->rsp_iocb = NULL;
13220
13221 piocb->context_un.wait_queue = NULL;
13222 piocb->cmd_cmpl = NULL;
13223 return retval;
13224}
13225
13226/**
13227 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
13228 * @phba: Pointer to HBA context object.
13229 * @pmboxq: Pointer to driver mailbox object.
13230 * @timeout: Timeout in number of seconds.
13231 *
13232 * This function issues the mailbox to firmware and waits for the
13233 * mailbox command to complete. If the mailbox command is not
13234 * completed within timeout seconds, it returns MBX_TIMEOUT.
13235 * The function waits for the mailbox completion using an
13236 * interruptible wait. If the thread is woken up due to a
13237 * signal, MBX_TIMEOUT error is returned to the caller. Caller
13238 * should not free the mailbox resources, if this function returns
13239 * MBX_TIMEOUT.
13240 * This function will sleep while waiting for mailbox completion.
13241 * So, this function should not be called from any context which
13242 * does not allow sleeping. Due to the same reason, this function
13243 * cannot be called with interrupt disabled.
13244 * This function assumes that the mailbox completion occurs while
13245 * this function sleep. So, this function cannot be called from
13246 * the worker thread which processes mailbox completion.
13247 * This function is called in the context of HBA management
13248 * applications.
13249 * This function returns MBX_SUCCESS when successful.
13250 * This function is called with no lock held.
13251 **/
13252int
13253lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
13254 uint32_t timeout)
13255{
13256 struct completion mbox_done;
13257 int retval;
13258 unsigned long flag;
13259
13260 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
13261 /* setup wake call as IOCB callback */
13262 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
13263
13264 /* setup ctx_u field to pass wait_queue pointer to wake function */
13265 init_completion(&mbox_done);
13266 pmboxq->ctx_u.mbox_wait = &mbox_done;
13267 /* now issue the command */
13268 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
13269 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
13270 wait_for_completion_timeout(&mbox_done,
13271 msecs_to_jiffies(timeout * 1000));
13272
13273 spin_lock_irqsave(&phba->hbalock, flag);
13274 pmboxq->ctx_u.mbox_wait = NULL;
13275 /*
13276 * if LPFC_MBX_WAKE flag is set the mailbox is completed
13277 * else do not free the resources.
13278 */
13279 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
13280 retval = MBX_SUCCESS;
13281 } else {
13282 retval = MBX_TIMEOUT;
13283 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13284 }
13285 spin_unlock_irqrestore(&phba->hbalock, flag);
13286 }
13287 return retval;
13288}
13289
13290/**
13291 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
13292 * @phba: Pointer to HBA context.
13293 * @mbx_action: Mailbox shutdown options.
13294 *
13295 * This function is called to shutdown the driver's mailbox sub-system.
13296 * It first marks the mailbox sub-system is in a block state to prevent
13297 * the asynchronous mailbox command from issued off the pending mailbox
13298 * command queue. If the mailbox command sub-system shutdown is due to
13299 * HBA error conditions such as EEH or ERATT, this routine shall invoke
13300 * the mailbox sub-system flush routine to forcefully bring down the
13301 * mailbox sub-system. Otherwise, if it is due to normal condition (such
13302 * as with offline or HBA function reset), this routine will wait for the
13303 * outstanding mailbox command to complete before invoking the mailbox
13304 * sub-system flush routine to gracefully bring down mailbox sub-system.
13305 **/
13306void
13307lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
13308{
13309 struct lpfc_sli *psli = &phba->sli;
13310 unsigned long timeout;
13311
13312 if (mbx_action == LPFC_MBX_NO_WAIT) {
13313 /* delay 100ms for port state */
13314 msleep(100);
13315 lpfc_sli_mbox_sys_flush(phba);
13316 return;
13317 }
13318 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
13319
13320 /* Disable softirqs, including timers from obtaining phba->hbalock */
13321 local_bh_disable();
13322
13323 spin_lock_irq(&phba->hbalock);
13324 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13325
13326 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
13327 /* Determine how long we might wait for the active mailbox
13328 * command to be gracefully completed by firmware.
13329 */
13330 if (phba->sli.mbox_active)
13331 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
13332 phba->sli.mbox_active) *
13333 1000) + jiffies;
13334 spin_unlock_irq(&phba->hbalock);
13335
13336 /* Enable softirqs again, done with phba->hbalock */
13337 local_bh_enable();
13338
13339 while (phba->sli.mbox_active) {
13340 /* Check active mailbox complete status every 2ms */
13341 msleep(2);
13342 if (time_after(jiffies, timeout))
13343 /* Timeout, let the mailbox flush routine to
13344 * forcefully release active mailbox command
13345 */
13346 break;
13347 }
13348 } else {
13349 spin_unlock_irq(&phba->hbalock);
13350
13351 /* Enable softirqs again, done with phba->hbalock */
13352 local_bh_enable();
13353 }
13354
13355 lpfc_sli_mbox_sys_flush(phba);
13356}
13357
13358/**
13359 * lpfc_sli_eratt_read - read sli-3 error attention events
13360 * @phba: Pointer to HBA context.
13361 *
13362 * This function is called to read the SLI3 device error attention registers
13363 * for possible error attention events. The caller must hold the hostlock
13364 * with spin_lock_irq().
13365 *
13366 * This function returns 1 when there is Error Attention in the Host Attention
13367 * Register and returns 0 otherwise.
13368 **/
13369static int
13370lpfc_sli_eratt_read(struct lpfc_hba *phba)
13371{
13372 uint32_t ha_copy;
13373
13374 /* Read chip Host Attention (HA) register */
13375 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13376 goto unplug_err;
13377
13378 if (ha_copy & HA_ERATT) {
13379 /* Read host status register to retrieve error event */
13380 if (lpfc_sli_read_hs(phba))
13381 goto unplug_err;
13382
13383 /* Check if there is a deferred error condition is active */
13384 if ((HS_FFER1 & phba->work_hs) &&
13385 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13386 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
13387 set_bit(DEFER_ERATT, &phba->hba_flag);
13388 /* Clear all interrupt enable conditions */
13389 writel(0, phba->HCregaddr);
13390 readl(phba->HCregaddr);
13391 }
13392
13393 /* Set the driver HA work bitmap */
13394 phba->work_ha |= HA_ERATT;
13395 /* Indicate polling handles this ERATT */
13396 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13397 return 1;
13398 }
13399 return 0;
13400
13401unplug_err:
13402 /* Set the driver HS work bitmap */
13403 phba->work_hs |= UNPLUG_ERR;
13404 /* Set the driver HA work bitmap */
13405 phba->work_ha |= HA_ERATT;
13406 /* Indicate polling handles this ERATT */
13407 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13408 return 1;
13409}
13410
13411/**
13412 * lpfc_sli4_eratt_read - read sli-4 error attention events
13413 * @phba: Pointer to HBA context.
13414 *
13415 * This function is called to read the SLI4 device error attention registers
13416 * for possible error attention events. The caller must hold the hostlock
13417 * with spin_lock_irq().
13418 *
13419 * This function returns 1 when there is Error Attention in the Host Attention
13420 * Register and returns 0 otherwise.
13421 **/
13422static int
13423lpfc_sli4_eratt_read(struct lpfc_hba *phba)
13424{
13425 uint32_t uerr_sta_hi, uerr_sta_lo;
13426 uint32_t if_type, portsmphr;
13427 struct lpfc_register portstat_reg;
13428 u32 logmask;
13429
13430 /*
13431 * For now, use the SLI4 device internal unrecoverable error
13432 * registers for error attention. This can be changed later.
13433 */
13434 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
13435 switch (if_type) {
13436 case LPFC_SLI_INTF_IF_TYPE_0:
13437 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
13438 &uerr_sta_lo) ||
13439 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
13440 &uerr_sta_hi)) {
13441 phba->work_hs |= UNPLUG_ERR;
13442 phba->work_ha |= HA_ERATT;
13443 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13444 return 1;
13445 }
13446 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
13447 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
13448 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13449 "1423 HBA Unrecoverable error: "
13450 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
13451 "ue_mask_lo_reg=0x%x, "
13452 "ue_mask_hi_reg=0x%x\n",
13453 uerr_sta_lo, uerr_sta_hi,
13454 phba->sli4_hba.ue_mask_lo,
13455 phba->sli4_hba.ue_mask_hi);
13456 phba->work_status[0] = uerr_sta_lo;
13457 phba->work_status[1] = uerr_sta_hi;
13458 phba->work_ha |= HA_ERATT;
13459 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13460 return 1;
13461 }
13462 break;
13463 case LPFC_SLI_INTF_IF_TYPE_2:
13464 case LPFC_SLI_INTF_IF_TYPE_6:
13465 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
13466 &portstat_reg.word0) ||
13467 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
13468 &portsmphr)){
13469 phba->work_hs |= UNPLUG_ERR;
13470 phba->work_ha |= HA_ERATT;
13471 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13472 return 1;
13473 }
13474 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
13475 phba->work_status[0] =
13476 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
13477 phba->work_status[1] =
13478 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
13479 logmask = LOG_TRACE_EVENT;
13480 if (phba->work_status[0] ==
13481 SLIPORT_ERR1_REG_ERR_CODE_2 &&
13482 phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
13483 logmask = LOG_SLI;
13484 lpfc_printf_log(phba, KERN_ERR, logmask,
13485 "2885 Port Status Event: "
13486 "port status reg 0x%x, "
13487 "port smphr reg 0x%x, "
13488 "error 1=0x%x, error 2=0x%x\n",
13489 portstat_reg.word0,
13490 portsmphr,
13491 phba->work_status[0],
13492 phba->work_status[1]);
13493 phba->work_ha |= HA_ERATT;
13494 set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13495 return 1;
13496 }
13497 break;
13498 case LPFC_SLI_INTF_IF_TYPE_1:
13499 default:
13500 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13501 "2886 HBA Error Attention on unsupported "
13502 "if type %d.", if_type);
13503 return 1;
13504 }
13505
13506 return 0;
13507}
13508
13509/**
13510 * lpfc_sli_check_eratt - check error attention events
13511 * @phba: Pointer to HBA context.
13512 *
13513 * This function is called from timer soft interrupt context to check HBA's
13514 * error attention register bit for error attention events.
13515 *
13516 * This function returns 1 when there is Error Attention in the Host Attention
13517 * Register and returns 0 otherwise.
13518 **/
13519int
13520lpfc_sli_check_eratt(struct lpfc_hba *phba)
13521{
13522 uint32_t ha_copy;
13523
13524 /* If somebody is waiting to handle an eratt, don't process it
13525 * here. The brdkill function will do this.
13526 */
13527 if (phba->link_flag & LS_IGNORE_ERATT)
13528 return 0;
13529
13530 /* Check if interrupt handler handles this ERATT */
13531 if (test_bit(HBA_ERATT_HANDLED, &phba->hba_flag))
13532 /* Interrupt handler has handled ERATT */
13533 return 0;
13534
13535 /*
13536 * If there is deferred error attention, do not check for error
13537 * attention
13538 */
13539 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
13540 return 0;
13541
13542 spin_lock_irq(&phba->hbalock);
13543 /* If PCI channel is offline, don't process it */
13544 if (unlikely(pci_channel_offline(phba->pcidev))) {
13545 spin_unlock_irq(&phba->hbalock);
13546 return 0;
13547 }
13548
13549 switch (phba->sli_rev) {
13550 case LPFC_SLI_REV2:
13551 case LPFC_SLI_REV3:
13552 /* Read chip Host Attention (HA) register */
13553 ha_copy = lpfc_sli_eratt_read(phba);
13554 break;
13555 case LPFC_SLI_REV4:
13556 /* Read device Uncoverable Error (UERR) registers */
13557 ha_copy = lpfc_sli4_eratt_read(phba);
13558 break;
13559 default:
13560 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13561 "0299 Invalid SLI revision (%d)\n",
13562 phba->sli_rev);
13563 ha_copy = 0;
13564 break;
13565 }
13566 spin_unlock_irq(&phba->hbalock);
13567
13568 return ha_copy;
13569}
13570
13571/**
13572 * lpfc_intr_state_check - Check device state for interrupt handling
13573 * @phba: Pointer to HBA context.
13574 *
13575 * This inline routine checks whether a device or its PCI slot is in a state
13576 * that the interrupt should be handled.
13577 *
13578 * This function returns 0 if the device or the PCI slot is in a state that
13579 * interrupt should be handled, otherwise -EIO.
13580 */
13581static inline int
13582lpfc_intr_state_check(struct lpfc_hba *phba)
13583{
13584 /* If the pci channel is offline, ignore all the interrupts */
13585 if (unlikely(pci_channel_offline(phba->pcidev)))
13586 return -EIO;
13587
13588 /* Update device level interrupt statistics */
13589 phba->sli.slistat.sli_intr++;
13590
13591 /* Ignore all interrupts during initialization. */
13592 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
13593 return -EIO;
13594
13595 return 0;
13596}
13597
13598/**
13599 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
13600 * @irq: Interrupt number.
13601 * @dev_id: The device context pointer.
13602 *
13603 * This function is directly called from the PCI layer as an interrupt
13604 * service routine when device with SLI-3 interface spec is enabled with
13605 * MSI-X multi-message interrupt mode and there are slow-path events in
13606 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
13607 * interrupt mode, this function is called as part of the device-level
13608 * interrupt handler. When the PCI slot is in error recovery or the HBA
13609 * is undergoing initialization, the interrupt handler will not process
13610 * the interrupt. The link attention and ELS ring attention events are
13611 * handled by the worker thread. The interrupt handler signals the worker
13612 * thread and returns for these events. This function is called without
13613 * any lock held. It gets the hbalock to access and update SLI data
13614 * structures.
13615 *
13616 * This function returns IRQ_HANDLED when interrupt is handled else it
13617 * returns IRQ_NONE.
13618 **/
13619irqreturn_t
13620lpfc_sli_sp_intr_handler(int irq, void *dev_id)
13621{
13622 struct lpfc_hba *phba;
13623 uint32_t ha_copy, hc_copy;
13624 uint32_t work_ha_copy;
13625 unsigned long status;
13626 unsigned long iflag;
13627 uint32_t control;
13628
13629 MAILBOX_t *mbox, *pmbox;
13630 struct lpfc_vport *vport;
13631 struct lpfc_nodelist *ndlp;
13632 struct lpfc_dmabuf *mp;
13633 LPFC_MBOXQ_t *pmb;
13634 int rc;
13635
13636 /*
13637 * Get the driver's phba structure from the dev_id and
13638 * assume the HBA is not interrupting.
13639 */
13640 phba = (struct lpfc_hba *)dev_id;
13641
13642 if (unlikely(!phba))
13643 return IRQ_NONE;
13644
13645 /*
13646 * Stuff needs to be attented to when this function is invoked as an
13647 * individual interrupt handler in MSI-X multi-message interrupt mode
13648 */
13649 if (phba->intr_type == MSIX) {
13650 /* Check device state for handling interrupt */
13651 if (lpfc_intr_state_check(phba))
13652 return IRQ_NONE;
13653 /* Need to read HA REG for slow-path events */
13654 spin_lock_irqsave(&phba->hbalock, iflag);
13655 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13656 goto unplug_error;
13657 /* If somebody is waiting to handle an eratt don't process it
13658 * here. The brdkill function will do this.
13659 */
13660 if (phba->link_flag & LS_IGNORE_ERATT)
13661 ha_copy &= ~HA_ERATT;
13662 /* Check the need for handling ERATT in interrupt handler */
13663 if (ha_copy & HA_ERATT) {
13664 if (test_and_set_bit(HBA_ERATT_HANDLED,
13665 &phba->hba_flag))
13666 /* ERATT polling has handled ERATT */
13667 ha_copy &= ~HA_ERATT;
13668 }
13669
13670 /*
13671 * If there is deferred error attention, do not check for any
13672 * interrupt.
13673 */
13674 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
13675 spin_unlock_irqrestore(&phba->hbalock, iflag);
13676 return IRQ_NONE;
13677 }
13678
13679 /* Clear up only attention source related to slow-path */
13680 if (lpfc_readl(phba->HCregaddr, &hc_copy))
13681 goto unplug_error;
13682
13683 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
13684 HC_LAINT_ENA | HC_ERINT_ENA),
13685 phba->HCregaddr);
13686 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
13687 phba->HAregaddr);
13688 writel(hc_copy, phba->HCregaddr);
13689 readl(phba->HAregaddr); /* flush */
13690 spin_unlock_irqrestore(&phba->hbalock, iflag);
13691 } else
13692 ha_copy = phba->ha_copy;
13693
13694 work_ha_copy = ha_copy & phba->work_ha_mask;
13695
13696 if (work_ha_copy) {
13697 if (work_ha_copy & HA_LATT) {
13698 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
13699 /*
13700 * Turn off Link Attention interrupts
13701 * until CLEAR_LA done
13702 */
13703 spin_lock_irqsave(&phba->hbalock, iflag);
13704 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
13705 if (lpfc_readl(phba->HCregaddr, &control))
13706 goto unplug_error;
13707 control &= ~HC_LAINT_ENA;
13708 writel(control, phba->HCregaddr);
13709 readl(phba->HCregaddr); /* flush */
13710 spin_unlock_irqrestore(&phba->hbalock, iflag);
13711 }
13712 else
13713 work_ha_copy &= ~HA_LATT;
13714 }
13715
13716 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
13717 /*
13718 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
13719 * the only slow ring.
13720 */
13721 status = (work_ha_copy &
13722 (HA_RXMASK << (4*LPFC_ELS_RING)));
13723 status >>= (4*LPFC_ELS_RING);
13724 if (status & HA_RXMASK) {
13725 spin_lock_irqsave(&phba->hbalock, iflag);
13726 if (lpfc_readl(phba->HCregaddr, &control))
13727 goto unplug_error;
13728
13729 lpfc_debugfs_slow_ring_trc(phba,
13730 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
13731 control, status,
13732 (uint32_t)phba->sli.slistat.sli_intr);
13733
13734 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
13735 lpfc_debugfs_slow_ring_trc(phba,
13736 "ISR Disable ring:"
13737 "pwork:x%x hawork:x%x wait:x%x",
13738 phba->work_ha, work_ha_copy,
13739 (uint32_t)((unsigned long)
13740 &phba->work_waitq));
13741
13742 control &=
13743 ~(HC_R0INT_ENA << LPFC_ELS_RING);
13744 writel(control, phba->HCregaddr);
13745 readl(phba->HCregaddr); /* flush */
13746 }
13747 else {
13748 lpfc_debugfs_slow_ring_trc(phba,
13749 "ISR slow ring: pwork:"
13750 "x%x hawork:x%x wait:x%x",
13751 phba->work_ha, work_ha_copy,
13752 (uint32_t)((unsigned long)
13753 &phba->work_waitq));
13754 }
13755 spin_unlock_irqrestore(&phba->hbalock, iflag);
13756 }
13757 }
13758 spin_lock_irqsave(&phba->hbalock, iflag);
13759 if (work_ha_copy & HA_ERATT) {
13760 if (lpfc_sli_read_hs(phba))
13761 goto unplug_error;
13762 /*
13763 * Check if there is a deferred error condition
13764 * is active
13765 */
13766 if ((HS_FFER1 & phba->work_hs) &&
13767 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13768 HS_FFER6 | HS_FFER7 | HS_FFER8) &
13769 phba->work_hs)) {
13770 set_bit(DEFER_ERATT, &phba->hba_flag);
13771 /* Clear all interrupt enable conditions */
13772 writel(0, phba->HCregaddr);
13773 readl(phba->HCregaddr);
13774 }
13775 }
13776
13777 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
13778 pmb = phba->sli.mbox_active;
13779 pmbox = &pmb->u.mb;
13780 mbox = phba->mbox;
13781 vport = pmb->vport;
13782
13783 /* First check out the status word */
13784 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
13785 if (pmbox->mbxOwner != OWN_HOST) {
13786 spin_unlock_irqrestore(&phba->hbalock, iflag);
13787 /*
13788 * Stray Mailbox Interrupt, mbxCommand <cmd>
13789 * mbxStatus <status>
13790 */
13791 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13792 "(%d):0304 Stray Mailbox "
13793 "Interrupt mbxCommand x%x "
13794 "mbxStatus x%x\n",
13795 (vport ? vport->vpi : 0),
13796 pmbox->mbxCommand,
13797 pmbox->mbxStatus);
13798 /* clear mailbox attention bit */
13799 work_ha_copy &= ~HA_MBATT;
13800 } else {
13801 phba->sli.mbox_active = NULL;
13802 spin_unlock_irqrestore(&phba->hbalock, iflag);
13803 phba->last_completion_time = jiffies;
13804 del_timer(&phba->sli.mbox_tmo);
13805 if (pmb->mbox_cmpl) {
13806 lpfc_sli_pcimem_bcopy(mbox, pmbox,
13807 MAILBOX_CMD_SIZE);
13808 if (pmb->out_ext_byte_len &&
13809 pmb->ext_buf)
13810 lpfc_sli_pcimem_bcopy(
13811 phba->mbox_ext,
13812 pmb->ext_buf,
13813 pmb->out_ext_byte_len);
13814 }
13815 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13816 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13817
13818 lpfc_debugfs_disc_trc(vport,
13819 LPFC_DISC_TRC_MBOX_VPORT,
13820 "MBOX dflt rpi: : "
13821 "status:x%x rpi:x%x",
13822 (uint32_t)pmbox->mbxStatus,
13823 pmbox->un.varWords[0], 0);
13824
13825 if (!pmbox->mbxStatus) {
13826 mp = pmb->ctx_buf;
13827 ndlp = pmb->ctx_ndlp;
13828
13829 /* Reg_LOGIN of dflt RPI was
13830 * successful. new lets get
13831 * rid of the RPI using the
13832 * same mbox buffer.
13833 */
13834 lpfc_unreg_login(phba,
13835 vport->vpi,
13836 pmbox->un.varWords[0],
13837 pmb);
13838 pmb->mbox_cmpl =
13839 lpfc_mbx_cmpl_dflt_rpi;
13840 pmb->ctx_buf = mp;
13841 pmb->ctx_ndlp = ndlp;
13842 pmb->vport = vport;
13843 rc = lpfc_sli_issue_mbox(phba,
13844 pmb,
13845 MBX_NOWAIT);
13846 if (rc != MBX_BUSY)
13847 lpfc_printf_log(phba,
13848 KERN_ERR,
13849 LOG_TRACE_EVENT,
13850 "0350 rc should have"
13851 "been MBX_BUSY\n");
13852 if (rc != MBX_NOT_FINISHED)
13853 goto send_current_mbox;
13854 }
13855 }
13856 spin_lock_irqsave(
13857 &phba->pport->work_port_lock,
13858 iflag);
13859 phba->pport->work_port_events &=
13860 ~WORKER_MBOX_TMO;
13861 spin_unlock_irqrestore(
13862 &phba->pport->work_port_lock,
13863 iflag);
13864
13865 /* Do NOT queue MBX_HEARTBEAT to the worker
13866 * thread for processing.
13867 */
13868 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13869 /* Process mbox now */
13870 phba->sli.mbox_active = NULL;
13871 phba->sli.sli_flag &=
13872 ~LPFC_SLI_MBOX_ACTIVE;
13873 if (pmb->mbox_cmpl)
13874 pmb->mbox_cmpl(phba, pmb);
13875 } else {
13876 /* Queue to worker thread to process */
13877 lpfc_mbox_cmpl_put(phba, pmb);
13878 }
13879 }
13880 } else
13881 spin_unlock_irqrestore(&phba->hbalock, iflag);
13882
13883 if ((work_ha_copy & HA_MBATT) &&
13884 (phba->sli.mbox_active == NULL)) {
13885send_current_mbox:
13886 /* Process next mailbox command if there is one */
13887 do {
13888 rc = lpfc_sli_issue_mbox(phba, NULL,
13889 MBX_NOWAIT);
13890 } while (rc == MBX_NOT_FINISHED);
13891 if (rc != MBX_SUCCESS)
13892 lpfc_printf_log(phba, KERN_ERR,
13893 LOG_TRACE_EVENT,
13894 "0349 rc should be "
13895 "MBX_SUCCESS\n");
13896 }
13897
13898 spin_lock_irqsave(&phba->hbalock, iflag);
13899 phba->work_ha |= work_ha_copy;
13900 spin_unlock_irqrestore(&phba->hbalock, iflag);
13901 lpfc_worker_wake_up(phba);
13902 }
13903 return IRQ_HANDLED;
13904unplug_error:
13905 spin_unlock_irqrestore(&phba->hbalock, iflag);
13906 return IRQ_HANDLED;
13907
13908} /* lpfc_sli_sp_intr_handler */
13909
13910/**
13911 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13912 * @irq: Interrupt number.
13913 * @dev_id: The device context pointer.
13914 *
13915 * This function is directly called from the PCI layer as an interrupt
13916 * service routine when device with SLI-3 interface spec is enabled with
13917 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13918 * ring event in the HBA. However, when the device is enabled with either
13919 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13920 * device-level interrupt handler. When the PCI slot is in error recovery
13921 * or the HBA is undergoing initialization, the interrupt handler will not
13922 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13923 * the intrrupt context. This function is called without any lock held.
13924 * It gets the hbalock to access and update SLI data structures.
13925 *
13926 * This function returns IRQ_HANDLED when interrupt is handled else it
13927 * returns IRQ_NONE.
13928 **/
13929irqreturn_t
13930lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13931{
13932 struct lpfc_hba *phba;
13933 uint32_t ha_copy;
13934 unsigned long status;
13935 unsigned long iflag;
13936 struct lpfc_sli_ring *pring;
13937
13938 /* Get the driver's phba structure from the dev_id and
13939 * assume the HBA is not interrupting.
13940 */
13941 phba = (struct lpfc_hba *) dev_id;
13942
13943 if (unlikely(!phba))
13944 return IRQ_NONE;
13945
13946 /*
13947 * Stuff needs to be attented to when this function is invoked as an
13948 * individual interrupt handler in MSI-X multi-message interrupt mode
13949 */
13950 if (phba->intr_type == MSIX) {
13951 /* Check device state for handling interrupt */
13952 if (lpfc_intr_state_check(phba))
13953 return IRQ_NONE;
13954 /* Need to read HA REG for FCP ring and other ring events */
13955 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13956 return IRQ_HANDLED;
13957
13958 /*
13959 * If there is deferred error attention, do not check for
13960 * any interrupt.
13961 */
13962 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
13963 return IRQ_NONE;
13964
13965 /* Clear up only attention source related to fast-path */
13966 spin_lock_irqsave(&phba->hbalock, iflag);
13967 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13968 phba->HAregaddr);
13969 readl(phba->HAregaddr); /* flush */
13970 spin_unlock_irqrestore(&phba->hbalock, iflag);
13971 } else
13972 ha_copy = phba->ha_copy;
13973
13974 /*
13975 * Process all events on FCP ring. Take the optimized path for FCP IO.
13976 */
13977 ha_copy &= ~(phba->work_ha_mask);
13978
13979 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13980 status >>= (4*LPFC_FCP_RING);
13981 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13982 if (status & HA_RXMASK)
13983 lpfc_sli_handle_fast_ring_event(phba, pring, status);
13984
13985 if (phba->cfg_multi_ring_support == 2) {
13986 /*
13987 * Process all events on extra ring. Take the optimized path
13988 * for extra ring IO.
13989 */
13990 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13991 status >>= (4*LPFC_EXTRA_RING);
13992 if (status & HA_RXMASK) {
13993 lpfc_sli_handle_fast_ring_event(phba,
13994 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
13995 status);
13996 }
13997 }
13998 return IRQ_HANDLED;
13999} /* lpfc_sli_fp_intr_handler */
14000
14001/**
14002 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
14003 * @irq: Interrupt number.
14004 * @dev_id: The device context pointer.
14005 *
14006 * This function is the HBA device-level interrupt handler to device with
14007 * SLI-3 interface spec, called from the PCI layer when either MSI or
14008 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
14009 * requires driver attention. This function invokes the slow-path interrupt
14010 * attention handling function and fast-path interrupt attention handling
14011 * function in turn to process the relevant HBA attention events. This
14012 * function is called without any lock held. It gets the hbalock to access
14013 * and update SLI data structures.
14014 *
14015 * This function returns IRQ_HANDLED when interrupt is handled, else it
14016 * returns IRQ_NONE.
14017 **/
14018irqreturn_t
14019lpfc_sli_intr_handler(int irq, void *dev_id)
14020{
14021 struct lpfc_hba *phba;
14022 irqreturn_t sp_irq_rc, fp_irq_rc;
14023 unsigned long status1, status2;
14024 uint32_t hc_copy;
14025
14026 /*
14027 * Get the driver's phba structure from the dev_id and
14028 * assume the HBA is not interrupting.
14029 */
14030 phba = (struct lpfc_hba *) dev_id;
14031
14032 if (unlikely(!phba))
14033 return IRQ_NONE;
14034
14035 /* Check device state for handling interrupt */
14036 if (lpfc_intr_state_check(phba))
14037 return IRQ_NONE;
14038
14039 spin_lock(&phba->hbalock);
14040 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
14041 spin_unlock(&phba->hbalock);
14042 return IRQ_HANDLED;
14043 }
14044
14045 if (unlikely(!phba->ha_copy)) {
14046 spin_unlock(&phba->hbalock);
14047 return IRQ_NONE;
14048 } else if (phba->ha_copy & HA_ERATT) {
14049 if (test_and_set_bit(HBA_ERATT_HANDLED, &phba->hba_flag))
14050 /* ERATT polling has handled ERATT */
14051 phba->ha_copy &= ~HA_ERATT;
14052 }
14053
14054 /*
14055 * If there is deferred error attention, do not check for any interrupt.
14056 */
14057 if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
14058 spin_unlock(&phba->hbalock);
14059 return IRQ_NONE;
14060 }
14061
14062 /* Clear attention sources except link and error attentions */
14063 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
14064 spin_unlock(&phba->hbalock);
14065 return IRQ_HANDLED;
14066 }
14067 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
14068 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
14069 phba->HCregaddr);
14070 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
14071 writel(hc_copy, phba->HCregaddr);
14072 readl(phba->HAregaddr); /* flush */
14073 spin_unlock(&phba->hbalock);
14074
14075 /*
14076 * Invokes slow-path host attention interrupt handling as appropriate.
14077 */
14078
14079 /* status of events with mailbox and link attention */
14080 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
14081
14082 /* status of events with ELS ring */
14083 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
14084 status2 >>= (4*LPFC_ELS_RING);
14085
14086 if (status1 || (status2 & HA_RXMASK))
14087 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
14088 else
14089 sp_irq_rc = IRQ_NONE;
14090
14091 /*
14092 * Invoke fast-path host attention interrupt handling as appropriate.
14093 */
14094
14095 /* status of events with FCP ring */
14096 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
14097 status1 >>= (4*LPFC_FCP_RING);
14098
14099 /* status of events with extra ring */
14100 if (phba->cfg_multi_ring_support == 2) {
14101 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
14102 status2 >>= (4*LPFC_EXTRA_RING);
14103 } else
14104 status2 = 0;
14105
14106 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
14107 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
14108 else
14109 fp_irq_rc = IRQ_NONE;
14110
14111 /* Return device-level interrupt handling status */
14112 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
14113} /* lpfc_sli_intr_handler */
14114
14115/**
14116 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
14117 * @phba: pointer to lpfc hba data structure.
14118 *
14119 * This routine is invoked by the worker thread to process all the pending
14120 * SLI4 els abort xri events.
14121 **/
14122void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
14123{
14124 struct lpfc_cq_event *cq_event;
14125 unsigned long iflags;
14126
14127 /* First, declare the els xri abort event has been handled */
14128 clear_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag);
14129
14130 /* Now, handle all the els xri abort events */
14131 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14132 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
14133 /* Get the first event from the head of the event queue */
14134 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
14135 cq_event, struct lpfc_cq_event, list);
14136 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14137 iflags);
14138 /* Notify aborted XRI for ELS work queue */
14139 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
14140
14141 /* Free the event processed back to the free pool */
14142 lpfc_sli4_cq_event_release(phba, cq_event);
14143 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14144 iflags);
14145 }
14146 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14147}
14148
14149/**
14150 * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
14151 * @phba: Pointer to HBA context object.
14152 * @irspiocbq: Pointer to work-queue completion queue entry.
14153 *
14154 * This routine handles an ELS work-queue completion event and construct
14155 * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
14156 * discovery engine to handle.
14157 *
14158 * Return: Pointer to the receive IOCBQ, NULL otherwise.
14159 **/
14160static struct lpfc_iocbq *
14161lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
14162 struct lpfc_iocbq *irspiocbq)
14163{
14164 struct lpfc_sli_ring *pring;
14165 struct lpfc_iocbq *cmdiocbq;
14166 struct lpfc_wcqe_complete *wcqe;
14167 unsigned long iflags;
14168
14169 pring = lpfc_phba_elsring(phba);
14170 if (unlikely(!pring))
14171 return NULL;
14172
14173 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
14174 spin_lock_irqsave(&pring->ring_lock, iflags);
14175 pring->stats.iocb_event++;
14176 /* Look up the ELS command IOCB and create pseudo response IOCB */
14177 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14178 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14179 if (unlikely(!cmdiocbq)) {
14180 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14181 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14182 "0386 ELS complete with no corresponding "
14183 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
14184 wcqe->word0, wcqe->total_data_placed,
14185 wcqe->parameter, wcqe->word3);
14186 lpfc_sli_release_iocbq(phba, irspiocbq);
14187 return NULL;
14188 }
14189
14190 memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
14191 memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
14192
14193 /* Put the iocb back on the txcmplq */
14194 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
14195 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14196
14197 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
14198 spin_lock_irqsave(&phba->hbalock, iflags);
14199 irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
14200 spin_unlock_irqrestore(&phba->hbalock, iflags);
14201 }
14202
14203 return irspiocbq;
14204}
14205
14206inline struct lpfc_cq_event *
14207lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
14208{
14209 struct lpfc_cq_event *cq_event;
14210
14211 /* Allocate a new internal CQ_EVENT entry */
14212 cq_event = lpfc_sli4_cq_event_alloc(phba);
14213 if (!cq_event) {
14214 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14215 "0602 Failed to alloc CQ_EVENT entry\n");
14216 return NULL;
14217 }
14218
14219 /* Move the CQE into the event */
14220 memcpy(&cq_event->cqe, entry, size);
14221 return cq_event;
14222}
14223
14224/**
14225 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
14226 * @phba: Pointer to HBA context object.
14227 * @mcqe: Pointer to mailbox completion queue entry.
14228 *
14229 * This routine process a mailbox completion queue entry with asynchronous
14230 * event.
14231 *
14232 * Return: true if work posted to worker thread, otherwise false.
14233 **/
14234static bool
14235lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14236{
14237 struct lpfc_cq_event *cq_event;
14238 unsigned long iflags;
14239
14240 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14241 "0392 Async Event: word0:x%x, word1:x%x, "
14242 "word2:x%x, word3:x%x\n", mcqe->word0,
14243 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
14244
14245 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
14246 if (!cq_event)
14247 return false;
14248
14249 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
14250 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
14251 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
14252
14253 /* Set the async event flag */
14254 set_bit(ASYNC_EVENT, &phba->hba_flag);
14255
14256 return true;
14257}
14258
14259/**
14260 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
14261 * @phba: Pointer to HBA context object.
14262 * @mcqe: Pointer to mailbox completion queue entry.
14263 *
14264 * This routine process a mailbox completion queue entry with mailbox
14265 * completion event.
14266 *
14267 * Return: true if work posted to worker thread, otherwise false.
14268 **/
14269static bool
14270lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14271{
14272 uint32_t mcqe_status;
14273 MAILBOX_t *mbox, *pmbox;
14274 struct lpfc_mqe *mqe;
14275 struct lpfc_vport *vport;
14276 struct lpfc_nodelist *ndlp;
14277 struct lpfc_dmabuf *mp;
14278 unsigned long iflags;
14279 LPFC_MBOXQ_t *pmb;
14280 bool workposted = false;
14281 int rc;
14282
14283 /* If not a mailbox complete MCQE, out by checking mailbox consume */
14284 if (!bf_get(lpfc_trailer_completed, mcqe))
14285 goto out_no_mqe_complete;
14286
14287 /* Get the reference to the active mbox command */
14288 spin_lock_irqsave(&phba->hbalock, iflags);
14289 pmb = phba->sli.mbox_active;
14290 if (unlikely(!pmb)) {
14291 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14292 "1832 No pending MBOX command to handle\n");
14293 spin_unlock_irqrestore(&phba->hbalock, iflags);
14294 goto out_no_mqe_complete;
14295 }
14296 spin_unlock_irqrestore(&phba->hbalock, iflags);
14297 mqe = &pmb->u.mqe;
14298 pmbox = (MAILBOX_t *)&pmb->u.mqe;
14299 mbox = phba->mbox;
14300 vport = pmb->vport;
14301
14302 /* Reset heartbeat timer */
14303 phba->last_completion_time = jiffies;
14304 del_timer(&phba->sli.mbox_tmo);
14305
14306 /* Move mbox data to caller's mailbox region, do endian swapping */
14307 if (pmb->mbox_cmpl && mbox)
14308 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
14309
14310 /*
14311 * For mcqe errors, conditionally move a modified error code to
14312 * the mbox so that the error will not be missed.
14313 */
14314 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
14315 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
14316 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
14317 bf_set(lpfc_mqe_status, mqe,
14318 (LPFC_MBX_ERROR_RANGE | mcqe_status));
14319 }
14320 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
14321 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
14322 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
14323 "MBOX dflt rpi: status:x%x rpi:x%x",
14324 mcqe_status,
14325 pmbox->un.varWords[0], 0);
14326 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
14327 mp = pmb->ctx_buf;
14328 ndlp = pmb->ctx_ndlp;
14329
14330 /* Reg_LOGIN of dflt RPI was successful. Mark the
14331 * node as having an UNREG_LOGIN in progress to stop
14332 * an unsolicited PLOGI from the same NPortId from
14333 * starting another mailbox transaction.
14334 */
14335 spin_lock_irqsave(&ndlp->lock, iflags);
14336 ndlp->nlp_flag |= NLP_UNREG_INP;
14337 spin_unlock_irqrestore(&ndlp->lock, iflags);
14338 lpfc_unreg_login(phba, vport->vpi,
14339 pmbox->un.varWords[0], pmb);
14340 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
14341 pmb->ctx_buf = mp;
14342
14343 /* No reference taken here. This is a default
14344 * RPI reg/immediate unreg cycle. The reference was
14345 * taken in the reg rpi path and is released when
14346 * this mailbox completes.
14347 */
14348 pmb->ctx_ndlp = ndlp;
14349 pmb->vport = vport;
14350 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
14351 if (rc != MBX_BUSY)
14352 lpfc_printf_log(phba, KERN_ERR,
14353 LOG_TRACE_EVENT,
14354 "0385 rc should "
14355 "have been MBX_BUSY\n");
14356 if (rc != MBX_NOT_FINISHED)
14357 goto send_current_mbox;
14358 }
14359 }
14360 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
14361 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
14362 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
14363
14364 /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
14365 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
14366 spin_lock_irqsave(&phba->hbalock, iflags);
14367 /* Release the mailbox command posting token */
14368 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14369 phba->sli.mbox_active = NULL;
14370 if (bf_get(lpfc_trailer_consumed, mcqe))
14371 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14372 spin_unlock_irqrestore(&phba->hbalock, iflags);
14373
14374 /* Post the next mbox command, if there is one */
14375 lpfc_sli4_post_async_mbox(phba);
14376
14377 /* Process cmpl now */
14378 if (pmb->mbox_cmpl)
14379 pmb->mbox_cmpl(phba, pmb);
14380 return false;
14381 }
14382
14383 /* There is mailbox completion work to queue to the worker thread */
14384 spin_lock_irqsave(&phba->hbalock, iflags);
14385 __lpfc_mbox_cmpl_put(phba, pmb);
14386 phba->work_ha |= HA_MBATT;
14387 spin_unlock_irqrestore(&phba->hbalock, iflags);
14388 workposted = true;
14389
14390send_current_mbox:
14391 spin_lock_irqsave(&phba->hbalock, iflags);
14392 /* Release the mailbox command posting token */
14393 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14394 /* Setting active mailbox pointer need to be in sync to flag clear */
14395 phba->sli.mbox_active = NULL;
14396 if (bf_get(lpfc_trailer_consumed, mcqe))
14397 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14398 spin_unlock_irqrestore(&phba->hbalock, iflags);
14399 /* Wake up worker thread to post the next pending mailbox command */
14400 lpfc_worker_wake_up(phba);
14401 return workposted;
14402
14403out_no_mqe_complete:
14404 spin_lock_irqsave(&phba->hbalock, iflags);
14405 if (bf_get(lpfc_trailer_consumed, mcqe))
14406 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14407 spin_unlock_irqrestore(&phba->hbalock, iflags);
14408 return false;
14409}
14410
14411/**
14412 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
14413 * @phba: Pointer to HBA context object.
14414 * @cq: Pointer to associated CQ
14415 * @cqe: Pointer to mailbox completion queue entry.
14416 *
14417 * This routine process a mailbox completion queue entry, it invokes the
14418 * proper mailbox complete handling or asynchronous event handling routine
14419 * according to the MCQE's async bit.
14420 *
14421 * Return: true if work posted to worker thread, otherwise false.
14422 **/
14423static bool
14424lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14425 struct lpfc_cqe *cqe)
14426{
14427 struct lpfc_mcqe mcqe;
14428 bool workposted;
14429
14430 cq->CQ_mbox++;
14431
14432 /* Copy the mailbox MCQE and convert endian order as needed */
14433 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
14434
14435 /* Invoke the proper event handling routine */
14436 if (!bf_get(lpfc_trailer_async, &mcqe))
14437 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
14438 else
14439 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
14440 return workposted;
14441}
14442
14443/**
14444 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
14445 * @phba: Pointer to HBA context object.
14446 * @cq: Pointer to associated CQ
14447 * @wcqe: Pointer to work-queue completion queue entry.
14448 *
14449 * This routine handles an ELS work-queue completion event.
14450 *
14451 * Return: true if work posted to worker thread, otherwise false.
14452 **/
14453static bool
14454lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14455 struct lpfc_wcqe_complete *wcqe)
14456{
14457 struct lpfc_iocbq *irspiocbq;
14458 unsigned long iflags;
14459 struct lpfc_sli_ring *pring = cq->pring;
14460 int txq_cnt = 0;
14461 int txcmplq_cnt = 0;
14462
14463 /* Check for response status */
14464 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14465 /* Log the error status */
14466 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14467 "0357 ELS CQE error: status=x%x: "
14468 "CQE: %08x %08x %08x %08x\n",
14469 bf_get(lpfc_wcqe_c_status, wcqe),
14470 wcqe->word0, wcqe->total_data_placed,
14471 wcqe->parameter, wcqe->word3);
14472 }
14473
14474 /* Get an irspiocbq for later ELS response processing use */
14475 irspiocbq = lpfc_sli_get_iocbq(phba);
14476 if (!irspiocbq) {
14477 if (!list_empty(&pring->txq))
14478 txq_cnt++;
14479 if (!list_empty(&pring->txcmplq))
14480 txcmplq_cnt++;
14481 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14482 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
14483 "els_txcmplq_cnt=%d\n",
14484 txq_cnt, phba->iocb_cnt,
14485 txcmplq_cnt);
14486 return false;
14487 }
14488
14489 /* Save off the slow-path queue event for work thread to process */
14490 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
14491 spin_lock_irqsave(&phba->hbalock, iflags);
14492 list_add_tail(&irspiocbq->cq_event.list,
14493 &phba->sli4_hba.sp_queue_event);
14494 spin_unlock_irqrestore(&phba->hbalock, iflags);
14495 set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
14496
14497 return true;
14498}
14499
14500/**
14501 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
14502 * @phba: Pointer to HBA context object.
14503 * @wcqe: Pointer to work-queue completion queue entry.
14504 *
14505 * This routine handles slow-path WQ entry consumed event by invoking the
14506 * proper WQ release routine to the slow-path WQ.
14507 **/
14508static void
14509lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
14510 struct lpfc_wcqe_release *wcqe)
14511{
14512 /* sanity check on queue memory */
14513 if (unlikely(!phba->sli4_hba.els_wq))
14514 return;
14515 /* Check for the slow-path ELS work queue */
14516 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
14517 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
14518 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14519 else
14520 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14521 "2579 Slow-path wqe consume event carries "
14522 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
14523 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
14524 phba->sli4_hba.els_wq->queue_id);
14525}
14526
14527/**
14528 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
14529 * @phba: Pointer to HBA context object.
14530 * @cq: Pointer to a WQ completion queue.
14531 * @wcqe: Pointer to work-queue completion queue entry.
14532 *
14533 * This routine handles an XRI abort event.
14534 *
14535 * Return: true if work posted to worker thread, otherwise false.
14536 **/
14537static bool
14538lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
14539 struct lpfc_queue *cq,
14540 struct sli4_wcqe_xri_aborted *wcqe)
14541{
14542 bool workposted = false;
14543 struct lpfc_cq_event *cq_event;
14544 unsigned long iflags;
14545
14546 switch (cq->subtype) {
14547 case LPFC_IO:
14548 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
14549 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14550 /* Notify aborted XRI for NVME work queue */
14551 if (phba->nvmet_support)
14552 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
14553 }
14554 workposted = false;
14555 break;
14556 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
14557 case LPFC_ELS:
14558 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
14559 if (!cq_event) {
14560 workposted = false;
14561 break;
14562 }
14563 cq_event->hdwq = cq->hdwq;
14564 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14565 iflags);
14566 list_add_tail(&cq_event->list,
14567 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
14568 /* Set the els xri abort event flag */
14569 set_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag);
14570 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14571 iflags);
14572 workposted = true;
14573 break;
14574 default:
14575 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14576 "0603 Invalid CQ subtype %d: "
14577 "%08x %08x %08x %08x\n",
14578 cq->subtype, wcqe->word0, wcqe->parameter,
14579 wcqe->word2, wcqe->word3);
14580 workposted = false;
14581 break;
14582 }
14583 return workposted;
14584}
14585
14586#define FC_RCTL_MDS_DIAGS 0xF4
14587
14588/**
14589 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
14590 * @phba: Pointer to HBA context object.
14591 * @rcqe: Pointer to receive-queue completion queue entry.
14592 *
14593 * This routine process a receive-queue completion queue entry.
14594 *
14595 * Return: true if work posted to worker thread, otherwise false.
14596 **/
14597static bool
14598lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
14599{
14600 bool workposted = false;
14601 struct fc_frame_header *fc_hdr;
14602 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
14603 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
14604 struct lpfc_nvmet_tgtport *tgtp;
14605 struct hbq_dmabuf *dma_buf;
14606 uint32_t status, rq_id;
14607 unsigned long iflags;
14608
14609 /* sanity check on queue memory */
14610 if (unlikely(!hrq) || unlikely(!drq))
14611 return workposted;
14612
14613 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14614 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14615 else
14616 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14617 if (rq_id != hrq->queue_id)
14618 goto out;
14619
14620 status = bf_get(lpfc_rcqe_status, rcqe);
14621 switch (status) {
14622 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14623 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14624 "2537 Receive Frame Truncated!!\n");
14625 fallthrough;
14626 case FC_STATUS_RQ_SUCCESS:
14627 spin_lock_irqsave(&phba->hbalock, iflags);
14628 lpfc_sli4_rq_release(hrq, drq);
14629 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14630 if (!dma_buf) {
14631 hrq->RQ_no_buf_found++;
14632 spin_unlock_irqrestore(&phba->hbalock, iflags);
14633 goto out;
14634 }
14635 hrq->RQ_rcv_buf++;
14636 hrq->RQ_buf_posted--;
14637 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
14638
14639 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14640
14641 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
14642 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
14643 spin_unlock_irqrestore(&phba->hbalock, iflags);
14644 /* Handle MDS Loopback frames */
14645 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
14646 lpfc_sli4_handle_mds_loopback(phba->pport,
14647 dma_buf);
14648 else
14649 lpfc_in_buf_free(phba, &dma_buf->dbuf);
14650 break;
14651 }
14652
14653 /* save off the frame for the work thread to process */
14654 list_add_tail(&dma_buf->cq_event.list,
14655 &phba->sli4_hba.sp_queue_event);
14656 spin_unlock_irqrestore(&phba->hbalock, iflags);
14657 /* Frame received */
14658 set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
14659 workposted = true;
14660 break;
14661 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14662 if (phba->nvmet_support) {
14663 tgtp = phba->targetport->private;
14664 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14665 "6402 RQE Error x%x, posted %d err_cnt "
14666 "%d: %x %x %x\n",
14667 status, hrq->RQ_buf_posted,
14668 hrq->RQ_no_posted_buf,
14669 atomic_read(&tgtp->rcv_fcp_cmd_in),
14670 atomic_read(&tgtp->rcv_fcp_cmd_out),
14671 atomic_read(&tgtp->xmt_fcp_release));
14672 }
14673 fallthrough;
14674
14675 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14676 hrq->RQ_no_posted_buf++;
14677 /* Post more buffers if possible */
14678 set_bit(HBA_POST_RECEIVE_BUFFER, &phba->hba_flag);
14679 workposted = true;
14680 break;
14681 case FC_STATUS_RQ_DMA_FAILURE:
14682 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14683 "2564 RQE DMA Error x%x, x%08x x%08x x%08x "
14684 "x%08x\n",
14685 status, rcqe->word0, rcqe->word1,
14686 rcqe->word2, rcqe->word3);
14687
14688 /* If IV set, no further recovery */
14689 if (bf_get(lpfc_rcqe_iv, rcqe))
14690 break;
14691
14692 /* recycle consumed resource */
14693 spin_lock_irqsave(&phba->hbalock, iflags);
14694 lpfc_sli4_rq_release(hrq, drq);
14695 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14696 if (!dma_buf) {
14697 hrq->RQ_no_buf_found++;
14698 spin_unlock_irqrestore(&phba->hbalock, iflags);
14699 break;
14700 }
14701 hrq->RQ_rcv_buf++;
14702 hrq->RQ_buf_posted--;
14703 spin_unlock_irqrestore(&phba->hbalock, iflags);
14704 lpfc_in_buf_free(phba, &dma_buf->dbuf);
14705 break;
14706 default:
14707 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14708 "2565 Unexpected RQE Status x%x, w0-3 x%08x "
14709 "x%08x x%08x x%08x\n",
14710 status, rcqe->word0, rcqe->word1,
14711 rcqe->word2, rcqe->word3);
14712 break;
14713 }
14714out:
14715 return workposted;
14716}
14717
14718/**
14719 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14720 * @phba: Pointer to HBA context object.
14721 * @cq: Pointer to the completion queue.
14722 * @cqe: Pointer to a completion queue entry.
14723 *
14724 * This routine process a slow-path work-queue or receive queue completion queue
14725 * entry.
14726 *
14727 * Return: true if work posted to worker thread, otherwise false.
14728 **/
14729static bool
14730lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14731 struct lpfc_cqe *cqe)
14732{
14733 struct lpfc_cqe cqevt;
14734 bool workposted = false;
14735
14736 /* Copy the work queue CQE and convert endian order if needed */
14737 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14738
14739 /* Check and process for different type of WCQE and dispatch */
14740 switch (bf_get(lpfc_cqe_code, &cqevt)) {
14741 case CQE_CODE_COMPL_WQE:
14742 /* Process the WQ/RQ complete event */
14743 phba->last_completion_time = jiffies;
14744 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14745 (struct lpfc_wcqe_complete *)&cqevt);
14746 break;
14747 case CQE_CODE_RELEASE_WQE:
14748 /* Process the WQ release event */
14749 lpfc_sli4_sp_handle_rel_wcqe(phba,
14750 (struct lpfc_wcqe_release *)&cqevt);
14751 break;
14752 case CQE_CODE_XRI_ABORTED:
14753 /* Process the WQ XRI abort event */
14754 phba->last_completion_time = jiffies;
14755 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14756 (struct sli4_wcqe_xri_aborted *)&cqevt);
14757 break;
14758 case CQE_CODE_RECEIVE:
14759 case CQE_CODE_RECEIVE_V1:
14760 /* Process the RQ event */
14761 phba->last_completion_time = jiffies;
14762 workposted = lpfc_sli4_sp_handle_rcqe(phba,
14763 (struct lpfc_rcqe *)&cqevt);
14764 break;
14765 default:
14766 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14767 "0388 Not a valid WCQE code: x%x\n",
14768 bf_get(lpfc_cqe_code, &cqevt));
14769 break;
14770 }
14771 return workposted;
14772}
14773
14774/**
14775 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14776 * @phba: Pointer to HBA context object.
14777 * @eqe: Pointer to fast-path event queue entry.
14778 * @speq: Pointer to slow-path event queue.
14779 *
14780 * This routine process a event queue entry from the slow-path event queue.
14781 * It will check the MajorCode and MinorCode to determine this is for a
14782 * completion event on a completion queue, if not, an error shall be logged
14783 * and just return. Otherwise, it will get to the corresponding completion
14784 * queue and process all the entries on that completion queue, rearm the
14785 * completion queue, and then return.
14786 *
14787 **/
14788static void
14789lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14790 struct lpfc_queue *speq)
14791{
14792 struct lpfc_queue *cq = NULL, *childq;
14793 uint16_t cqid;
14794 int ret = 0;
14795
14796 /* Get the reference to the corresponding CQ */
14797 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14798
14799 list_for_each_entry(childq, &speq->child_list, list) {
14800 if (childq->queue_id == cqid) {
14801 cq = childq;
14802 break;
14803 }
14804 }
14805 if (unlikely(!cq)) {
14806 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14807 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14808 "0365 Slow-path CQ identifier "
14809 "(%d) does not exist\n", cqid);
14810 return;
14811 }
14812
14813 /* Save EQ associated with this CQ */
14814 cq->assoc_qp = speq;
14815
14816 if (is_kdump_kernel())
14817 ret = queue_work(phba->wq, &cq->spwork);
14818 else
14819 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14820
14821 if (!ret)
14822 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14823 "0390 Cannot schedule queue work "
14824 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14825 cqid, cq->queue_id, raw_smp_processor_id());
14826}
14827
14828/**
14829 * __lpfc_sli4_process_cq - Process elements of a CQ
14830 * @phba: Pointer to HBA context object.
14831 * @cq: Pointer to CQ to be processed
14832 * @handler: Routine to process each cqe
14833 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14834 *
14835 * This routine processes completion queue entries in a CQ. While a valid
14836 * queue element is found, the handler is called. During processing checks
14837 * are made for periodic doorbell writes to let the hardware know of
14838 * element consumption.
14839 *
14840 * If the max limit on cqes to process is hit, or there are no more valid
14841 * entries, the loop stops. If we processed a sufficient number of elements,
14842 * meaning there is sufficient load, rather than rearming and generating
14843 * another interrupt, a cq rescheduling delay will be set. A delay of 0
14844 * indicates no rescheduling.
14845 *
14846 * Returns True if work scheduled, False otherwise.
14847 **/
14848static bool
14849__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14850 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14851 struct lpfc_cqe *), unsigned long *delay)
14852{
14853 struct lpfc_cqe *cqe;
14854 bool workposted = false;
14855 int count = 0, consumed = 0;
14856 bool arm = true;
14857
14858 /* default - no reschedule */
14859 *delay = 0;
14860
14861 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14862 goto rearm_and_exit;
14863
14864 /* Process all the entries to the CQ */
14865 cq->q_flag = 0;
14866 cqe = lpfc_sli4_cq_get(cq);
14867 while (cqe) {
14868 workposted |= handler(phba, cq, cqe);
14869 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14870
14871 consumed++;
14872 if (!(++count % cq->max_proc_limit))
14873 break;
14874
14875 if (!(count % cq->notify_interval)) {
14876 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14877 LPFC_QUEUE_NOARM);
14878 consumed = 0;
14879 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14880 }
14881
14882 if (count == LPFC_NVMET_CQ_NOTIFY)
14883 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14884
14885 cqe = lpfc_sli4_cq_get(cq);
14886 }
14887 if (count >= phba->cfg_cq_poll_threshold) {
14888 *delay = 1;
14889 arm = false;
14890 }
14891
14892 /* Track the max number of CQEs processed in 1 EQ */
14893 if (count > cq->CQ_max_cqe)
14894 cq->CQ_max_cqe = count;
14895
14896 cq->assoc_qp->EQ_cqe_cnt += count;
14897
14898 /* Catch the no cq entry condition */
14899 if (unlikely(count == 0))
14900 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14901 "0369 No entry from completion queue "
14902 "qid=%d\n", cq->queue_id);
14903
14904 xchg(&cq->queue_claimed, 0);
14905
14906rearm_and_exit:
14907 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14908 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14909
14910 return workposted;
14911}
14912
14913/**
14914 * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14915 * @cq: pointer to CQ to process
14916 *
14917 * This routine calls the cq processing routine with a handler specific
14918 * to the type of queue bound to it.
14919 *
14920 * The CQ routine returns two values: the first is the calling status,
14921 * which indicates whether work was queued to the background discovery
14922 * thread. If true, the routine should wakeup the discovery thread;
14923 * the second is the delay parameter. If non-zero, rather than rearming
14924 * the CQ and yet another interrupt, the CQ handler should be queued so
14925 * that it is processed in a subsequent polling action. The value of
14926 * the delay indicates when to reschedule it.
14927 **/
14928static void
14929__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14930{
14931 struct lpfc_hba *phba = cq->phba;
14932 unsigned long delay;
14933 bool workposted = false;
14934 int ret = 0;
14935
14936 /* Process and rearm the CQ */
14937 switch (cq->type) {
14938 case LPFC_MCQ:
14939 workposted |= __lpfc_sli4_process_cq(phba, cq,
14940 lpfc_sli4_sp_handle_mcqe,
14941 &delay);
14942 break;
14943 case LPFC_WCQ:
14944 if (cq->subtype == LPFC_IO)
14945 workposted |= __lpfc_sli4_process_cq(phba, cq,
14946 lpfc_sli4_fp_handle_cqe,
14947 &delay);
14948 else
14949 workposted |= __lpfc_sli4_process_cq(phba, cq,
14950 lpfc_sli4_sp_handle_cqe,
14951 &delay);
14952 break;
14953 default:
14954 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14955 "0370 Invalid completion queue type (%d)\n",
14956 cq->type);
14957 return;
14958 }
14959
14960 if (delay) {
14961 if (is_kdump_kernel())
14962 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14963 delay);
14964 else
14965 ret = queue_delayed_work_on(cq->chann, phba->wq,
14966 &cq->sched_spwork, delay);
14967 if (!ret)
14968 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14969 "0394 Cannot schedule queue work "
14970 "for cqid=%d on CPU %d\n",
14971 cq->queue_id, cq->chann);
14972 }
14973
14974 /* wake up worker thread if there are works to be done */
14975 if (workposted)
14976 lpfc_worker_wake_up(phba);
14977}
14978
14979/**
14980 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14981 * interrupt
14982 * @work: pointer to work element
14983 *
14984 * translates from the work handler and calls the slow-path handler.
14985 **/
14986static void
14987lpfc_sli4_sp_process_cq(struct work_struct *work)
14988{
14989 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14990
14991 __lpfc_sli4_sp_process_cq(cq);
14992}
14993
14994/**
14995 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14996 * @work: pointer to work element
14997 *
14998 * translates from the work handler and calls the slow-path handler.
14999 **/
15000static void
15001lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
15002{
15003 struct lpfc_queue *cq = container_of(to_delayed_work(work),
15004 struct lpfc_queue, sched_spwork);
15005
15006 __lpfc_sli4_sp_process_cq(cq);
15007}
15008
15009/**
15010 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
15011 * @phba: Pointer to HBA context object.
15012 * @cq: Pointer to associated CQ
15013 * @wcqe: Pointer to work-queue completion queue entry.
15014 *
15015 * This routine process a fast-path work queue completion entry from fast-path
15016 * event queue for FCP command response completion.
15017 **/
15018static void
15019lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15020 struct lpfc_wcqe_complete *wcqe)
15021{
15022 struct lpfc_sli_ring *pring = cq->pring;
15023 struct lpfc_iocbq *cmdiocbq;
15024 unsigned long iflags;
15025
15026 /* Check for response status */
15027 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
15028 /* If resource errors reported from HBA, reduce queue
15029 * depth of the SCSI device.
15030 */
15031 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
15032 IOSTAT_LOCAL_REJECT)) &&
15033 ((wcqe->parameter & IOERR_PARAM_MASK) ==
15034 IOERR_NO_RESOURCES))
15035 phba->lpfc_rampdown_queue_depth(phba);
15036
15037 /* Log the cmpl status */
15038 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
15039 "0373 FCP CQE cmpl: status=x%x: "
15040 "CQE: %08x %08x %08x %08x\n",
15041 bf_get(lpfc_wcqe_c_status, wcqe),
15042 wcqe->word0, wcqe->total_data_placed,
15043 wcqe->parameter, wcqe->word3);
15044 }
15045
15046 /* Look up the FCP command IOCB and create pseudo response IOCB */
15047 spin_lock_irqsave(&pring->ring_lock, iflags);
15048 pring->stats.iocb_event++;
15049 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
15050 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15051 spin_unlock_irqrestore(&pring->ring_lock, iflags);
15052 if (unlikely(!cmdiocbq)) {
15053 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15054 "0374 FCP complete with no corresponding "
15055 "cmdiocb: iotag (%d)\n",
15056 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15057 return;
15058 }
15059#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
15060 cmdiocbq->isr_timestamp = cq->isr_timestamp;
15061#endif
15062 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
15063 spin_lock_irqsave(&phba->hbalock, iflags);
15064 cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
15065 spin_unlock_irqrestore(&phba->hbalock, iflags);
15066 }
15067
15068 if (cmdiocbq->cmd_cmpl) {
15069 /* For FCP the flag is cleared in cmd_cmpl */
15070 if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
15071 cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
15072 spin_lock_irqsave(&phba->hbalock, iflags);
15073 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
15074 spin_unlock_irqrestore(&phba->hbalock, iflags);
15075 }
15076
15077 /* Pass the cmd_iocb and the wcqe to the upper layer */
15078 memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
15079 sizeof(struct lpfc_wcqe_complete));
15080 cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
15081 } else {
15082 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15083 "0375 FCP cmdiocb not callback function "
15084 "iotag: (%d)\n",
15085 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15086 }
15087}
15088
15089/**
15090 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
15091 * @phba: Pointer to HBA context object.
15092 * @cq: Pointer to completion queue.
15093 * @wcqe: Pointer to work-queue completion queue entry.
15094 *
15095 * This routine handles an fast-path WQ entry consumed event by invoking the
15096 * proper WQ release routine to the slow-path WQ.
15097 **/
15098static void
15099lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15100 struct lpfc_wcqe_release *wcqe)
15101{
15102 struct lpfc_queue *childwq;
15103 bool wqid_matched = false;
15104 uint16_t hba_wqid;
15105
15106 /* Check for fast-path FCP work queue release */
15107 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
15108 list_for_each_entry(childwq, &cq->child_list, list) {
15109 if (childwq->queue_id == hba_wqid) {
15110 lpfc_sli4_wq_release(childwq,
15111 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
15112 if (childwq->q_flag & HBA_NVMET_WQFULL)
15113 lpfc_nvmet_wqfull_process(phba, childwq);
15114 wqid_matched = true;
15115 break;
15116 }
15117 }
15118 /* Report warning log message if no match found */
15119 if (wqid_matched != true)
15120 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15121 "2580 Fast-path wqe consume event carries "
15122 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
15123}
15124
15125/**
15126 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
15127 * @phba: Pointer to HBA context object.
15128 * @cq: Pointer to completion queue.
15129 * @rcqe: Pointer to receive-queue completion queue entry.
15130 *
15131 * This routine process a receive-queue completion queue entry.
15132 *
15133 * Return: true if work posted to worker thread, otherwise false.
15134 **/
15135static bool
15136lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15137 struct lpfc_rcqe *rcqe)
15138{
15139 bool workposted = false;
15140 struct lpfc_queue *hrq;
15141 struct lpfc_queue *drq;
15142 struct rqb_dmabuf *dma_buf;
15143 struct fc_frame_header *fc_hdr;
15144 struct lpfc_nvmet_tgtport *tgtp;
15145 uint32_t status, rq_id;
15146 unsigned long iflags;
15147 uint32_t fctl, idx;
15148
15149 if ((phba->nvmet_support == 0) ||
15150 (phba->sli4_hba.nvmet_cqset == NULL))
15151 return workposted;
15152
15153 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
15154 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
15155 drq = phba->sli4_hba.nvmet_mrq_data[idx];
15156
15157 /* sanity check on queue memory */
15158 if (unlikely(!hrq) || unlikely(!drq))
15159 return workposted;
15160
15161 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
15162 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
15163 else
15164 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
15165
15166 if ((phba->nvmet_support == 0) ||
15167 (rq_id != hrq->queue_id))
15168 return workposted;
15169
15170 status = bf_get(lpfc_rcqe_status, rcqe);
15171 switch (status) {
15172 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
15173 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15174 "6126 Receive Frame Truncated!!\n");
15175 fallthrough;
15176 case FC_STATUS_RQ_SUCCESS:
15177 spin_lock_irqsave(&phba->hbalock, iflags);
15178 lpfc_sli4_rq_release(hrq, drq);
15179 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15180 if (!dma_buf) {
15181 hrq->RQ_no_buf_found++;
15182 spin_unlock_irqrestore(&phba->hbalock, iflags);
15183 goto out;
15184 }
15185 spin_unlock_irqrestore(&phba->hbalock, iflags);
15186 hrq->RQ_rcv_buf++;
15187 hrq->RQ_buf_posted--;
15188 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
15189
15190 /* Just some basic sanity checks on FCP Command frame */
15191 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
15192 fc_hdr->fh_f_ctl[1] << 8 |
15193 fc_hdr->fh_f_ctl[2]);
15194 if (((fctl &
15195 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
15196 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
15197 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
15198 goto drop;
15199
15200 if (fc_hdr->fh_type == FC_TYPE_FCP) {
15201 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
15202 lpfc_nvmet_unsol_fcp_event(
15203 phba, idx, dma_buf, cq->isr_timestamp,
15204 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
15205 return false;
15206 }
15207drop:
15208 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15209 break;
15210 case FC_STATUS_INSUFF_BUF_FRM_DISC:
15211 if (phba->nvmet_support) {
15212 tgtp = phba->targetport->private;
15213 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15214 "6401 RQE Error x%x, posted %d err_cnt "
15215 "%d: %x %x %x\n",
15216 status, hrq->RQ_buf_posted,
15217 hrq->RQ_no_posted_buf,
15218 atomic_read(&tgtp->rcv_fcp_cmd_in),
15219 atomic_read(&tgtp->rcv_fcp_cmd_out),
15220 atomic_read(&tgtp->xmt_fcp_release));
15221 }
15222 fallthrough;
15223
15224 case FC_STATUS_INSUFF_BUF_NEED_BUF:
15225 hrq->RQ_no_posted_buf++;
15226 /* Post more buffers if possible */
15227 break;
15228 case FC_STATUS_RQ_DMA_FAILURE:
15229 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15230 "2575 RQE DMA Error x%x, x%08x x%08x x%08x "
15231 "x%08x\n",
15232 status, rcqe->word0, rcqe->word1,
15233 rcqe->word2, rcqe->word3);
15234
15235 /* If IV set, no further recovery */
15236 if (bf_get(lpfc_rcqe_iv, rcqe))
15237 break;
15238
15239 /* recycle consumed resource */
15240 spin_lock_irqsave(&phba->hbalock, iflags);
15241 lpfc_sli4_rq_release(hrq, drq);
15242 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15243 if (!dma_buf) {
15244 hrq->RQ_no_buf_found++;
15245 spin_unlock_irqrestore(&phba->hbalock, iflags);
15246 break;
15247 }
15248 hrq->RQ_rcv_buf++;
15249 hrq->RQ_buf_posted--;
15250 spin_unlock_irqrestore(&phba->hbalock, iflags);
15251 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15252 break;
15253 default:
15254 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15255 "2576 Unexpected RQE Status x%x, w0-3 x%08x "
15256 "x%08x x%08x x%08x\n",
15257 status, rcqe->word0, rcqe->word1,
15258 rcqe->word2, rcqe->word3);
15259 break;
15260 }
15261out:
15262 return workposted;
15263}
15264
15265/**
15266 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
15267 * @phba: adapter with cq
15268 * @cq: Pointer to the completion queue.
15269 * @cqe: Pointer to fast-path completion queue entry.
15270 *
15271 * This routine process a fast-path work queue completion entry from fast-path
15272 * event queue for FCP command response completion.
15273 *
15274 * Return: true if work posted to worker thread, otherwise false.
15275 **/
15276static bool
15277lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15278 struct lpfc_cqe *cqe)
15279{
15280 struct lpfc_wcqe_release wcqe;
15281 bool workposted = false;
15282
15283 /* Copy the work queue CQE and convert endian order if needed */
15284 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
15285
15286 /* Check and process for different type of WCQE and dispatch */
15287 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
15288 case CQE_CODE_COMPL_WQE:
15289 case CQE_CODE_NVME_ERSP:
15290 cq->CQ_wq++;
15291 /* Process the WQ complete event */
15292 phba->last_completion_time = jiffies;
15293 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
15294 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
15295 (struct lpfc_wcqe_complete *)&wcqe);
15296 break;
15297 case CQE_CODE_RELEASE_WQE:
15298 cq->CQ_release_wqe++;
15299 /* Process the WQ release event */
15300 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
15301 (struct lpfc_wcqe_release *)&wcqe);
15302 break;
15303 case CQE_CODE_XRI_ABORTED:
15304 cq->CQ_xri_aborted++;
15305 /* Process the WQ XRI abort event */
15306 phba->last_completion_time = jiffies;
15307 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
15308 (struct sli4_wcqe_xri_aborted *)&wcqe);
15309 break;
15310 case CQE_CODE_RECEIVE_V1:
15311 case CQE_CODE_RECEIVE:
15312 phba->last_completion_time = jiffies;
15313 if (cq->subtype == LPFC_NVMET) {
15314 workposted = lpfc_sli4_nvmet_handle_rcqe(
15315 phba, cq, (struct lpfc_rcqe *)&wcqe);
15316 }
15317 break;
15318 default:
15319 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15320 "0144 Not a valid CQE code: x%x\n",
15321 bf_get(lpfc_wcqe_c_code, &wcqe));
15322 break;
15323 }
15324 return workposted;
15325}
15326
15327/**
15328 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
15329 * @cq: Pointer to CQ to be processed
15330 *
15331 * This routine calls the cq processing routine with the handler for
15332 * fast path CQEs.
15333 *
15334 * The CQ routine returns two values: the first is the calling status,
15335 * which indicates whether work was queued to the background discovery
15336 * thread. If true, the routine should wakeup the discovery thread;
15337 * the second is the delay parameter. If non-zero, rather than rearming
15338 * the CQ and yet another interrupt, the CQ handler should be queued so
15339 * that it is processed in a subsequent polling action. The value of
15340 * the delay indicates when to reschedule it.
15341 **/
15342static void
15343__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
15344{
15345 struct lpfc_hba *phba = cq->phba;
15346 unsigned long delay;
15347 bool workposted = false;
15348 int ret;
15349
15350 /* process and rearm the CQ */
15351 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
15352 &delay);
15353
15354 if (delay) {
15355 if (is_kdump_kernel())
15356 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
15357 delay);
15358 else
15359 ret = queue_delayed_work_on(cq->chann, phba->wq,
15360 &cq->sched_irqwork, delay);
15361 if (!ret)
15362 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15363 "0367 Cannot schedule queue work "
15364 "for cqid=%d on CPU %d\n",
15365 cq->queue_id, cq->chann);
15366 }
15367
15368 /* wake up worker thread if there are works to be done */
15369 if (workposted)
15370 lpfc_worker_wake_up(phba);
15371}
15372
15373/**
15374 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
15375 * interrupt
15376 * @work: pointer to work element
15377 *
15378 * translates from the work handler and calls the fast-path handler.
15379 **/
15380static void
15381lpfc_sli4_hba_process_cq(struct work_struct *work)
15382{
15383 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
15384
15385 __lpfc_sli4_hba_process_cq(cq);
15386}
15387
15388/**
15389 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
15390 * @phba: Pointer to HBA context object.
15391 * @eq: Pointer to the queue structure.
15392 * @eqe: Pointer to fast-path event queue entry.
15393 * @poll_mode: poll_mode to execute processing the cq.
15394 *
15395 * This routine process a event queue entry from the fast-path event queue.
15396 * It will check the MajorCode and MinorCode to determine this is for a
15397 * completion event on a completion queue, if not, an error shall be logged
15398 * and just return. Otherwise, it will get to the corresponding completion
15399 * queue and process all the entries on the completion queue, rearm the
15400 * completion queue, and then return.
15401 **/
15402static void
15403lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15404 struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode)
15405{
15406 struct lpfc_queue *cq = NULL;
15407 uint32_t qidx = eq->hdwq;
15408 uint16_t cqid, id;
15409 int ret;
15410
15411 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
15412 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15413 "0366 Not a valid completion "
15414 "event: majorcode=x%x, minorcode=x%x\n",
15415 bf_get_le32(lpfc_eqe_major_code, eqe),
15416 bf_get_le32(lpfc_eqe_minor_code, eqe));
15417 return;
15418 }
15419
15420 /* Get the reference to the corresponding CQ */
15421 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
15422
15423 /* Use the fast lookup method first */
15424 if (cqid <= phba->sli4_hba.cq_max) {
15425 cq = phba->sli4_hba.cq_lookup[cqid];
15426 if (cq)
15427 goto work_cq;
15428 }
15429
15430 /* Next check for NVMET completion */
15431 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
15432 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
15433 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
15434 /* Process NVMET unsol rcv */
15435 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
15436 goto process_cq;
15437 }
15438 }
15439
15440 if (phba->sli4_hba.nvmels_cq &&
15441 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
15442 /* Process NVME unsol rcv */
15443 cq = phba->sli4_hba.nvmels_cq;
15444 }
15445
15446 /* Otherwise this is a Slow path event */
15447 if (cq == NULL) {
15448 lpfc_sli4_sp_handle_eqe(phba, eqe,
15449 phba->sli4_hba.hdwq[qidx].hba_eq);
15450 return;
15451 }
15452
15453process_cq:
15454 if (unlikely(cqid != cq->queue_id)) {
15455 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15456 "0368 Miss-matched fast-path completion "
15457 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
15458 cqid, cq->queue_id);
15459 return;
15460 }
15461
15462work_cq:
15463#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
15464 if (phba->ktime_on)
15465 cq->isr_timestamp = ktime_get_ns();
15466 else
15467 cq->isr_timestamp = 0;
15468#endif
15469
15470 switch (poll_mode) {
15471 case LPFC_THREADED_IRQ:
15472 __lpfc_sli4_hba_process_cq(cq);
15473 break;
15474 case LPFC_QUEUE_WORK:
15475 default:
15476 if (is_kdump_kernel())
15477 ret = queue_work(phba->wq, &cq->irqwork);
15478 else
15479 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
15480 if (!ret)
15481 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15482 "0383 Cannot schedule queue work "
15483 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
15484 cqid, cq->queue_id,
15485 raw_smp_processor_id());
15486 break;
15487 }
15488}
15489
15490/**
15491 * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
15492 * @work: pointer to work element
15493 *
15494 * translates from the work handler and calls the fast-path handler.
15495 **/
15496static void
15497lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
15498{
15499 struct lpfc_queue *cq = container_of(to_delayed_work(work),
15500 struct lpfc_queue, sched_irqwork);
15501
15502 __lpfc_sli4_hba_process_cq(cq);
15503}
15504
15505/**
15506 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
15507 * @irq: Interrupt number.
15508 * @dev_id: The device context pointer.
15509 *
15510 * This function is directly called from the PCI layer as an interrupt
15511 * service routine when device with SLI-4 interface spec is enabled with
15512 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
15513 * ring event in the HBA. However, when the device is enabled with either
15514 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
15515 * device-level interrupt handler. When the PCI slot is in error recovery
15516 * or the HBA is undergoing initialization, the interrupt handler will not
15517 * process the interrupt. The SCSI FCP fast-path ring event are handled in
15518 * the intrrupt context. This function is called without any lock held.
15519 * It gets the hbalock to access and update SLI data structures. Note that,
15520 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
15521 * equal to that of FCP CQ index.
15522 *
15523 * The link attention and ELS ring attention events are handled
15524 * by the worker thread. The interrupt handler signals the worker thread
15525 * and returns for these events. This function is called without any lock
15526 * held. It gets the hbalock to access and update SLI data structures.
15527 *
15528 * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD
15529 * when interrupt is scheduled to be handled from a threaded irq context, or
15530 * else returns IRQ_NONE.
15531 **/
15532irqreturn_t
15533lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
15534{
15535 struct lpfc_hba *phba;
15536 struct lpfc_hba_eq_hdl *hba_eq_hdl;
15537 struct lpfc_queue *fpeq;
15538 unsigned long iflag;
15539 int hba_eqidx;
15540 int ecount = 0;
15541 struct lpfc_eq_intr_info *eqi;
15542
15543 /* Get the driver's phba structure from the dev_id */
15544 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
15545 phba = hba_eq_hdl->phba;
15546 hba_eqidx = hba_eq_hdl->idx;
15547
15548 if (unlikely(!phba))
15549 return IRQ_NONE;
15550 if (unlikely(!phba->sli4_hba.hdwq))
15551 return IRQ_NONE;
15552
15553 /* Get to the EQ struct associated with this vector */
15554 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15555 if (unlikely(!fpeq))
15556 return IRQ_NONE;
15557
15558 /* Check device state for handling interrupt */
15559 if (unlikely(lpfc_intr_state_check(phba))) {
15560 /* Check again for link_state with lock held */
15561 spin_lock_irqsave(&phba->hbalock, iflag);
15562 if (phba->link_state < LPFC_LINK_DOWN)
15563 /* Flush, clear interrupt, and rearm the EQ */
15564 lpfc_sli4_eqcq_flush(phba, fpeq);
15565 spin_unlock_irqrestore(&phba->hbalock, iflag);
15566 return IRQ_NONE;
15567 }
15568
15569 switch (fpeq->poll_mode) {
15570 case LPFC_THREADED_IRQ:
15571 /* CGN mgmt is mutually exclusive from irq processing */
15572 if (phba->cmf_active_mode == LPFC_CFG_OFF)
15573 return IRQ_WAKE_THREAD;
15574 fallthrough;
15575 case LPFC_QUEUE_WORK:
15576 default:
15577 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
15578 eqi->icnt++;
15579
15580 fpeq->last_cpu = raw_smp_processor_id();
15581
15582 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
15583 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
15584 phba->cfg_auto_imax &&
15585 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
15586 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
15587 lpfc_sli4_mod_hba_eq_delay(phba, fpeq,
15588 LPFC_MAX_AUTO_EQ_DELAY);
15589
15590 /* process and rearm the EQ */
15591 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
15592 LPFC_QUEUE_WORK);
15593
15594 if (unlikely(ecount == 0)) {
15595 fpeq->EQ_no_entry++;
15596 if (phba->intr_type == MSIX)
15597 /* MSI-X treated interrupt served as no EQ share INT */
15598 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15599 "0358 MSI-X interrupt with no EQE\n");
15600 else
15601 /* Non MSI-X treated on interrupt as EQ share INT */
15602 return IRQ_NONE;
15603 }
15604 }
15605
15606 return IRQ_HANDLED;
15607} /* lpfc_sli4_hba_intr_handler */
15608
15609/**
15610 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
15611 * @irq: Interrupt number.
15612 * @dev_id: The device context pointer.
15613 *
15614 * This function is the device-level interrupt handler to device with SLI-4
15615 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
15616 * interrupt mode is enabled and there is an event in the HBA which requires
15617 * driver attention. This function invokes the slow-path interrupt attention
15618 * handling function and fast-path interrupt attention handling function in
15619 * turn to process the relevant HBA attention events. This function is called
15620 * without any lock held. It gets the hbalock to access and update SLI data
15621 * structures.
15622 *
15623 * This function returns IRQ_HANDLED when interrupt is handled, else it
15624 * returns IRQ_NONE.
15625 **/
15626irqreturn_t
15627lpfc_sli4_intr_handler(int irq, void *dev_id)
15628{
15629 struct lpfc_hba *phba;
15630 irqreturn_t hba_irq_rc;
15631 bool hba_handled = false;
15632 int qidx;
15633
15634 /* Get the driver's phba structure from the dev_id */
15635 phba = (struct lpfc_hba *)dev_id;
15636
15637 if (unlikely(!phba))
15638 return IRQ_NONE;
15639
15640 /*
15641 * Invoke fast-path host attention interrupt handling as appropriate.
15642 */
15643 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
15644 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
15645 &phba->sli4_hba.hba_eq_hdl[qidx]);
15646 if (hba_irq_rc == IRQ_HANDLED)
15647 hba_handled |= true;
15648 }
15649
15650 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
15651} /* lpfc_sli4_intr_handler */
15652
15653void lpfc_sli4_poll_hbtimer(struct timer_list *t)
15654{
15655 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
15656 struct lpfc_queue *eq;
15657
15658 rcu_read_lock();
15659
15660 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15661 lpfc_sli4_poll_eq(eq);
15662 if (!list_empty(&phba->poll_list))
15663 mod_timer(&phba->cpuhp_poll_timer,
15664 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15665
15666 rcu_read_unlock();
15667}
15668
15669static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15670{
15671 struct lpfc_hba *phba = eq->phba;
15672
15673 /* kickstart slowpath processing if needed */
15674 if (list_empty(&phba->poll_list))
15675 mod_timer(&phba->cpuhp_poll_timer,
15676 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15677
15678 list_add_rcu(&eq->_poll_list, &phba->poll_list);
15679 synchronize_rcu();
15680}
15681
15682static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15683{
15684 struct lpfc_hba *phba = eq->phba;
15685
15686 /* Disable slowpath processing for this eq. Kick start the eq
15687 * by RE-ARMING the eq's ASAP
15688 */
15689 list_del_rcu(&eq->_poll_list);
15690 synchronize_rcu();
15691
15692 if (list_empty(&phba->poll_list))
15693 del_timer_sync(&phba->cpuhp_poll_timer);
15694}
15695
15696void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15697{
15698 struct lpfc_queue *eq, *next;
15699
15700 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15701 list_del(&eq->_poll_list);
15702
15703 INIT_LIST_HEAD(&phba->poll_list);
15704 synchronize_rcu();
15705}
15706
15707static inline void
15708__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15709{
15710 if (mode == eq->mode)
15711 return;
15712 /*
15713 * currently this function is only called during a hotplug
15714 * event and the cpu on which this function is executing
15715 * is going offline. By now the hotplug has instructed
15716 * the scheduler to remove this cpu from cpu active mask.
15717 * So we don't need to work about being put aside by the
15718 * scheduler for a high priority process. Yes, the inte-
15719 * rrupts could come but they are known to retire ASAP.
15720 */
15721
15722 /* Disable polling in the fastpath */
15723 WRITE_ONCE(eq->mode, mode);
15724 /* flush out the store buffer */
15725 smp_wmb();
15726
15727 /*
15728 * Add this eq to the polling list and start polling. For
15729 * a grace period both interrupt handler and poller will
15730 * try to process the eq _but_ that's fine. We have a
15731 * synchronization mechanism in place (queue_claimed) to
15732 * deal with it. This is just a draining phase for int-
15733 * errupt handler (not eq's) as we have guranteed through
15734 * barrier that all the CPUs have seen the new CQ_POLLED
15735 * state. which will effectively disable the REARMING of
15736 * the EQ. The whole idea is eq's die off eventually as
15737 * we are not rearming EQ's anymore.
15738 */
15739 mode ? lpfc_sli4_add_to_poll_list(eq) :
15740 lpfc_sli4_remove_from_poll_list(eq);
15741}
15742
15743void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15744{
15745 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15746}
15747
15748void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15749{
15750 struct lpfc_hba *phba = eq->phba;
15751
15752 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15753
15754 /* Kick start for the pending io's in h/w.
15755 * Once we switch back to interrupt processing on a eq
15756 * the io path completion will only arm eq's when it
15757 * receives a completion. But since eq's are in disa-
15758 * rmed state it doesn't receive a completion. This
15759 * creates a deadlock scenaro.
15760 */
15761 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15762}
15763
15764/**
15765 * lpfc_sli4_queue_free - free a queue structure and associated memory
15766 * @queue: The queue structure to free.
15767 *
15768 * This function frees a queue structure and the DMAable memory used for
15769 * the host resident queue. This function must be called after destroying the
15770 * queue on the HBA.
15771 **/
15772void
15773lpfc_sli4_queue_free(struct lpfc_queue *queue)
15774{
15775 struct lpfc_dmabuf *dmabuf;
15776
15777 if (!queue)
15778 return;
15779
15780 if (!list_empty(&queue->wq_list))
15781 list_del(&queue->wq_list);
15782
15783 while (!list_empty(&queue->page_list)) {
15784 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15785 list);
15786 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15787 dmabuf->virt, dmabuf->phys);
15788 kfree(dmabuf);
15789 }
15790 if (queue->rqbp) {
15791 lpfc_free_rq_buffer(queue->phba, queue);
15792 kfree(queue->rqbp);
15793 }
15794
15795 if (!list_empty(&queue->cpu_list))
15796 list_del(&queue->cpu_list);
15797
15798 kfree(queue);
15799 return;
15800}
15801
15802/**
15803 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15804 * @phba: The HBA that this queue is being created on.
15805 * @page_size: The size of a queue page
15806 * @entry_size: The size of each queue entry for this queue.
15807 * @entry_count: The number of entries that this queue will handle.
15808 * @cpu: The cpu that will primarily utilize this queue.
15809 *
15810 * This function allocates a queue structure and the DMAable memory used for
15811 * the host resident queue. This function must be called before creating the
15812 * queue on the HBA.
15813 **/
15814struct lpfc_queue *
15815lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15816 uint32_t entry_size, uint32_t entry_count, int cpu)
15817{
15818 struct lpfc_queue *queue;
15819 struct lpfc_dmabuf *dmabuf;
15820 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15821 uint16_t x, pgcnt;
15822
15823 if (!phba->sli4_hba.pc_sli4_params.supported)
15824 hw_page_size = page_size;
15825
15826 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15827
15828 /* If needed, Adjust page count to match the max the adapter supports */
15829 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15830 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15831
15832 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15833 GFP_KERNEL, cpu_to_node(cpu));
15834 if (!queue)
15835 return NULL;
15836
15837 INIT_LIST_HEAD(&queue->list);
15838 INIT_LIST_HEAD(&queue->_poll_list);
15839 INIT_LIST_HEAD(&queue->wq_list);
15840 INIT_LIST_HEAD(&queue->wqfull_list);
15841 INIT_LIST_HEAD(&queue->page_list);
15842 INIT_LIST_HEAD(&queue->child_list);
15843 INIT_LIST_HEAD(&queue->cpu_list);
15844
15845 /* Set queue parameters now. If the system cannot provide memory
15846 * resources, the free routine needs to know what was allocated.
15847 */
15848 queue->page_count = pgcnt;
15849 queue->q_pgs = (void **)&queue[1];
15850 queue->entry_cnt_per_pg = hw_page_size / entry_size;
15851 queue->entry_size = entry_size;
15852 queue->entry_count = entry_count;
15853 queue->page_size = hw_page_size;
15854 queue->phba = phba;
15855
15856 for (x = 0; x < queue->page_count; x++) {
15857 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15858 dev_to_node(&phba->pcidev->dev));
15859 if (!dmabuf)
15860 goto out_fail;
15861 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15862 hw_page_size, &dmabuf->phys,
15863 GFP_KERNEL);
15864 if (!dmabuf->virt) {
15865 kfree(dmabuf);
15866 goto out_fail;
15867 }
15868 dmabuf->buffer_tag = x;
15869 list_add_tail(&dmabuf->list, &queue->page_list);
15870 /* use lpfc_sli4_qe to index a paritcular entry in this page */
15871 queue->q_pgs[x] = dmabuf->virt;
15872 }
15873 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15874 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15875 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15876 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15877
15878 /* notify_interval will be set during q creation */
15879
15880 return queue;
15881out_fail:
15882 lpfc_sli4_queue_free(queue);
15883 return NULL;
15884}
15885
15886/**
15887 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15888 * @phba: HBA structure that indicates port to create a queue on.
15889 * @pci_barset: PCI BAR set flag.
15890 *
15891 * This function shall perform iomap of the specified PCI BAR address to host
15892 * memory address if not already done so and return it. The returned host
15893 * memory address can be NULL.
15894 */
15895static void __iomem *
15896lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15897{
15898 if (!phba->pcidev)
15899 return NULL;
15900
15901 switch (pci_barset) {
15902 case WQ_PCI_BAR_0_AND_1:
15903 return phba->pci_bar0_memmap_p;
15904 case WQ_PCI_BAR_2_AND_3:
15905 return phba->pci_bar2_memmap_p;
15906 case WQ_PCI_BAR_4_AND_5:
15907 return phba->pci_bar4_memmap_p;
15908 default:
15909 break;
15910 }
15911 return NULL;
15912}
15913
15914/**
15915 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15916 * @phba: HBA structure that EQs are on.
15917 * @startq: The starting EQ index to modify
15918 * @numq: The number of EQs (consecutive indexes) to modify
15919 * @usdelay: amount of delay
15920 *
15921 * This function revises the EQ delay on 1 or more EQs. The EQ delay
15922 * is set either by writing to a register (if supported by the SLI Port)
15923 * or by mailbox command. The mailbox command allows several EQs to be
15924 * updated at once.
15925 *
15926 * The @phba struct is used to send a mailbox command to HBA. The @startq
15927 * is used to get the starting EQ index to change. The @numq value is
15928 * used to specify how many consecutive EQ indexes, starting at EQ index,
15929 * are to be changed. This function is asynchronous and will wait for any
15930 * mailbox commands to finish before returning.
15931 *
15932 * On success this function will return a zero. If unable to allocate
15933 * enough memory this function will return -ENOMEM. If a mailbox command
15934 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15935 * have had their delay multipler changed.
15936 **/
15937void
15938lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15939 uint32_t numq, uint32_t usdelay)
15940{
15941 struct lpfc_mbx_modify_eq_delay *eq_delay;
15942 LPFC_MBOXQ_t *mbox;
15943 struct lpfc_queue *eq;
15944 int cnt = 0, rc, length;
15945 uint32_t shdr_status, shdr_add_status;
15946 uint32_t dmult;
15947 int qidx;
15948 union lpfc_sli4_cfg_shdr *shdr;
15949
15950 if (startq >= phba->cfg_irq_chann)
15951 return;
15952
15953 if (usdelay > 0xFFFF) {
15954 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15955 "6429 usdelay %d too large. Scaled down to "
15956 "0xFFFF.\n", usdelay);
15957 usdelay = 0xFFFF;
15958 }
15959
15960 /* set values by EQ_DELAY register if supported */
15961 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15962 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15963 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15964 if (!eq)
15965 continue;
15966
15967 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15968
15969 if (++cnt >= numq)
15970 break;
15971 }
15972 return;
15973 }
15974
15975 /* Otherwise, set values by mailbox cmd */
15976
15977 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15978 if (!mbox) {
15979 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15980 "6428 Failed allocating mailbox cmd buffer."
15981 " EQ delay was not set.\n");
15982 return;
15983 }
15984 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15985 sizeof(struct lpfc_sli4_cfg_mhdr));
15986 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15987 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15988 length, LPFC_SLI4_MBX_EMBED);
15989 eq_delay = &mbox->u.mqe.un.eq_delay;
15990
15991 /* Calculate delay multiper from maximum interrupt per second */
15992 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15993 if (dmult)
15994 dmult--;
15995 if (dmult > LPFC_DMULT_MAX)
15996 dmult = LPFC_DMULT_MAX;
15997
15998 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15999 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
16000 if (!eq)
16001 continue;
16002 eq->q_mode = usdelay;
16003 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
16004 eq_delay->u.request.eq[cnt].phase = 0;
16005 eq_delay->u.request.eq[cnt].delay_multi = dmult;
16006
16007 if (++cnt >= numq)
16008 break;
16009 }
16010 eq_delay->u.request.num_eq = cnt;
16011
16012 mbox->vport = phba->pport;
16013 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16014 mbox->ctx_ndlp = NULL;
16015 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16016 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
16017 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16018 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16019 if (shdr_status || shdr_add_status || rc) {
16020 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16021 "2512 MODIFY_EQ_DELAY mailbox failed with "
16022 "status x%x add_status x%x, mbx status x%x\n",
16023 shdr_status, shdr_add_status, rc);
16024 }
16025 mempool_free(mbox, phba->mbox_mem_pool);
16026 return;
16027}
16028
16029/**
16030 * lpfc_eq_create - Create an Event Queue on the HBA
16031 * @phba: HBA structure that indicates port to create a queue on.
16032 * @eq: The queue structure to use to create the event queue.
16033 * @imax: The maximum interrupt per second limit.
16034 *
16035 * This function creates an event queue, as detailed in @eq, on a port,
16036 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
16037 *
16038 * The @phba struct is used to send mailbox command to HBA. The @eq struct
16039 * is used to get the entry count and entry size that are necessary to
16040 * determine the number of pages to allocate and use for this queue. This
16041 * function will send the EQ_CREATE mailbox command to the HBA to setup the
16042 * event queue. This function is asynchronous and will wait for the mailbox
16043 * command to finish before continuing.
16044 *
16045 * On success this function will return a zero. If unable to allocate enough
16046 * memory this function will return -ENOMEM. If the queue create mailbox command
16047 * fails this function will return -ENXIO.
16048 **/
16049int
16050lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
16051{
16052 struct lpfc_mbx_eq_create *eq_create;
16053 LPFC_MBOXQ_t *mbox;
16054 int rc, length, status = 0;
16055 struct lpfc_dmabuf *dmabuf;
16056 uint32_t shdr_status, shdr_add_status;
16057 union lpfc_sli4_cfg_shdr *shdr;
16058 uint16_t dmult;
16059 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16060
16061 /* sanity check on queue memory */
16062 if (!eq)
16063 return -ENODEV;
16064 if (!phba->sli4_hba.pc_sli4_params.supported)
16065 hw_page_size = SLI4_PAGE_SIZE;
16066
16067 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16068 if (!mbox)
16069 return -ENOMEM;
16070 length = (sizeof(struct lpfc_mbx_eq_create) -
16071 sizeof(struct lpfc_sli4_cfg_mhdr));
16072 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16073 LPFC_MBOX_OPCODE_EQ_CREATE,
16074 length, LPFC_SLI4_MBX_EMBED);
16075 eq_create = &mbox->u.mqe.un.eq_create;
16076 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
16077 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
16078 eq->page_count);
16079 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
16080 LPFC_EQE_SIZE);
16081 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
16082
16083 /* Use version 2 of CREATE_EQ if eqav is set */
16084 if (phba->sli4_hba.pc_sli4_params.eqav) {
16085 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16086 LPFC_Q_CREATE_VERSION_2);
16087 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
16088 phba->sli4_hba.pc_sli4_params.eqav);
16089 }
16090
16091 /* don't setup delay multiplier using EQ_CREATE */
16092 dmult = 0;
16093 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
16094 dmult);
16095 switch (eq->entry_count) {
16096 default:
16097 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16098 "0360 Unsupported EQ count. (%d)\n",
16099 eq->entry_count);
16100 if (eq->entry_count < 256) {
16101 status = -EINVAL;
16102 goto out;
16103 }
16104 fallthrough; /* otherwise default to smallest count */
16105 case 256:
16106 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16107 LPFC_EQ_CNT_256);
16108 break;
16109 case 512:
16110 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16111 LPFC_EQ_CNT_512);
16112 break;
16113 case 1024:
16114 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16115 LPFC_EQ_CNT_1024);
16116 break;
16117 case 2048:
16118 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16119 LPFC_EQ_CNT_2048);
16120 break;
16121 case 4096:
16122 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16123 LPFC_EQ_CNT_4096);
16124 break;
16125 }
16126 list_for_each_entry(dmabuf, &eq->page_list, list) {
16127 memset(dmabuf->virt, 0, hw_page_size);
16128 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16129 putPaddrLow(dmabuf->phys);
16130 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16131 putPaddrHigh(dmabuf->phys);
16132 }
16133 mbox->vport = phba->pport;
16134 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16135 mbox->ctx_buf = NULL;
16136 mbox->ctx_ndlp = NULL;
16137 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16138 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16139 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16140 if (shdr_status || shdr_add_status || rc) {
16141 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16142 "2500 EQ_CREATE mailbox failed with "
16143 "status x%x add_status x%x, mbx status x%x\n",
16144 shdr_status, shdr_add_status, rc);
16145 status = -ENXIO;
16146 }
16147 eq->type = LPFC_EQ;
16148 eq->subtype = LPFC_NONE;
16149 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
16150 if (eq->queue_id == 0xFFFF)
16151 status = -ENXIO;
16152 eq->host_index = 0;
16153 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
16154 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
16155out:
16156 mempool_free(mbox, phba->mbox_mem_pool);
16157 return status;
16158}
16159
16160/**
16161 * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler
16162 * @irq: Interrupt number.
16163 * @dev_id: The device context pointer.
16164 *
16165 * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within
16166 * threaded irq context.
16167 *
16168 * Returns
16169 * IRQ_HANDLED - interrupt is handled
16170 * IRQ_NONE - otherwise
16171 **/
16172irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id)
16173{
16174 struct lpfc_hba *phba;
16175 struct lpfc_hba_eq_hdl *hba_eq_hdl;
16176 struct lpfc_queue *fpeq;
16177 int ecount = 0;
16178 int hba_eqidx;
16179 struct lpfc_eq_intr_info *eqi;
16180
16181 /* Get the driver's phba structure from the dev_id */
16182 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
16183 phba = hba_eq_hdl->phba;
16184 hba_eqidx = hba_eq_hdl->idx;
16185
16186 if (unlikely(!phba))
16187 return IRQ_NONE;
16188 if (unlikely(!phba->sli4_hba.hdwq))
16189 return IRQ_NONE;
16190
16191 /* Get to the EQ struct associated with this vector */
16192 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
16193 if (unlikely(!fpeq))
16194 return IRQ_NONE;
16195
16196 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id());
16197 eqi->icnt++;
16198
16199 fpeq->last_cpu = raw_smp_processor_id();
16200
16201 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
16202 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
16203 phba->cfg_auto_imax &&
16204 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
16205 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
16206 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
16207
16208 /* process and rearm the EQ */
16209 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
16210 LPFC_THREADED_IRQ);
16211
16212 if (unlikely(ecount == 0)) {
16213 fpeq->EQ_no_entry++;
16214 if (phba->intr_type == MSIX)
16215 /* MSI-X treated interrupt served as no EQ share INT */
16216 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16217 "3358 MSI-X interrupt with no EQE\n");
16218 else
16219 /* Non MSI-X treated on interrupt as EQ share INT */
16220 return IRQ_NONE;
16221 }
16222 return IRQ_HANDLED;
16223}
16224
16225/**
16226 * lpfc_cq_create - Create a Completion Queue on the HBA
16227 * @phba: HBA structure that indicates port to create a queue on.
16228 * @cq: The queue structure to use to create the completion queue.
16229 * @eq: The event queue to bind this completion queue to.
16230 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16231 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16232 *
16233 * This function creates a completion queue, as detailed in @wq, on a port,
16234 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
16235 *
16236 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16237 * is used to get the entry count and entry size that are necessary to
16238 * determine the number of pages to allocate and use for this queue. The @eq
16239 * is used to indicate which event queue to bind this completion queue to. This
16240 * function will send the CQ_CREATE mailbox command to the HBA to setup the
16241 * completion queue. This function is asynchronous and will wait for the mailbox
16242 * command to finish before continuing.
16243 *
16244 * On success this function will return a zero. If unable to allocate enough
16245 * memory this function will return -ENOMEM. If the queue create mailbox command
16246 * fails this function will return -ENXIO.
16247 **/
16248int
16249lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
16250 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
16251{
16252 struct lpfc_mbx_cq_create *cq_create;
16253 struct lpfc_dmabuf *dmabuf;
16254 LPFC_MBOXQ_t *mbox;
16255 int rc, length, status = 0;
16256 uint32_t shdr_status, shdr_add_status;
16257 union lpfc_sli4_cfg_shdr *shdr;
16258
16259 /* sanity check on queue memory */
16260 if (!cq || !eq)
16261 return -ENODEV;
16262
16263 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16264 if (!mbox)
16265 return -ENOMEM;
16266 length = (sizeof(struct lpfc_mbx_cq_create) -
16267 sizeof(struct lpfc_sli4_cfg_mhdr));
16268 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16269 LPFC_MBOX_OPCODE_CQ_CREATE,
16270 length, LPFC_SLI4_MBX_EMBED);
16271 cq_create = &mbox->u.mqe.un.cq_create;
16272 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
16273 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
16274 cq->page_count);
16275 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
16276 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
16277 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16278 phba->sli4_hba.pc_sli4_params.cqv);
16279 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
16280 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
16281 (cq->page_size / SLI4_PAGE_SIZE));
16282 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
16283 eq->queue_id);
16284 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
16285 phba->sli4_hba.pc_sli4_params.cqav);
16286 } else {
16287 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
16288 eq->queue_id);
16289 }
16290 switch (cq->entry_count) {
16291 case 2048:
16292 case 4096:
16293 if (phba->sli4_hba.pc_sli4_params.cqv ==
16294 LPFC_Q_CREATE_VERSION_2) {
16295 cq_create->u.request.context.lpfc_cq_context_count =
16296 cq->entry_count;
16297 bf_set(lpfc_cq_context_count,
16298 &cq_create->u.request.context,
16299 LPFC_CQ_CNT_WORD7);
16300 break;
16301 }
16302 fallthrough;
16303 default:
16304 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16305 "0361 Unsupported CQ count: "
16306 "entry cnt %d sz %d pg cnt %d\n",
16307 cq->entry_count, cq->entry_size,
16308 cq->page_count);
16309 if (cq->entry_count < 256) {
16310 status = -EINVAL;
16311 goto out;
16312 }
16313 fallthrough; /* otherwise default to smallest count */
16314 case 256:
16315 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16316 LPFC_CQ_CNT_256);
16317 break;
16318 case 512:
16319 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16320 LPFC_CQ_CNT_512);
16321 break;
16322 case 1024:
16323 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16324 LPFC_CQ_CNT_1024);
16325 break;
16326 }
16327 list_for_each_entry(dmabuf, &cq->page_list, list) {
16328 memset(dmabuf->virt, 0, cq->page_size);
16329 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16330 putPaddrLow(dmabuf->phys);
16331 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16332 putPaddrHigh(dmabuf->phys);
16333 }
16334 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16335
16336 /* The IOCTL status is embedded in the mailbox subheader. */
16337 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16338 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16339 if (shdr_status || shdr_add_status || rc) {
16340 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16341 "2501 CQ_CREATE mailbox failed with "
16342 "status x%x add_status x%x, mbx status x%x\n",
16343 shdr_status, shdr_add_status, rc);
16344 status = -ENXIO;
16345 goto out;
16346 }
16347 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16348 if (cq->queue_id == 0xFFFF) {
16349 status = -ENXIO;
16350 goto out;
16351 }
16352 /* link the cq onto the parent eq child list */
16353 list_add_tail(&cq->list, &eq->child_list);
16354 /* Set up completion queue's type and subtype */
16355 cq->type = type;
16356 cq->subtype = subtype;
16357 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16358 cq->assoc_qid = eq->queue_id;
16359 cq->assoc_qp = eq;
16360 cq->host_index = 0;
16361 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16362 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
16363
16364 if (cq->queue_id > phba->sli4_hba.cq_max)
16365 phba->sli4_hba.cq_max = cq->queue_id;
16366out:
16367 mempool_free(mbox, phba->mbox_mem_pool);
16368 return status;
16369}
16370
16371/**
16372 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
16373 * @phba: HBA structure that indicates port to create a queue on.
16374 * @cqp: The queue structure array to use to create the completion queues.
16375 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
16376 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16377 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16378 *
16379 * This function creates a set of completion queue, s to support MRQ
16380 * as detailed in @cqp, on a port,
16381 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
16382 *
16383 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16384 * is used to get the entry count and entry size that are necessary to
16385 * determine the number of pages to allocate and use for this queue. The @eq
16386 * is used to indicate which event queue to bind this completion queue to. This
16387 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
16388 * completion queue. This function is asynchronous and will wait for the mailbox
16389 * command to finish before continuing.
16390 *
16391 * On success this function will return a zero. If unable to allocate enough
16392 * memory this function will return -ENOMEM. If the queue create mailbox command
16393 * fails this function will return -ENXIO.
16394 **/
16395int
16396lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
16397 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
16398 uint32_t subtype)
16399{
16400 struct lpfc_queue *cq;
16401 struct lpfc_queue *eq;
16402 struct lpfc_mbx_cq_create_set *cq_set;
16403 struct lpfc_dmabuf *dmabuf;
16404 LPFC_MBOXQ_t *mbox;
16405 int rc, length, alloclen, status = 0;
16406 int cnt, idx, numcq, page_idx = 0;
16407 uint32_t shdr_status, shdr_add_status;
16408 union lpfc_sli4_cfg_shdr *shdr;
16409 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16410
16411 /* sanity check on queue memory */
16412 numcq = phba->cfg_nvmet_mrq;
16413 if (!cqp || !hdwq || !numcq)
16414 return -ENODEV;
16415
16416 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16417 if (!mbox)
16418 return -ENOMEM;
16419
16420 length = sizeof(struct lpfc_mbx_cq_create_set);
16421 length += ((numcq * cqp[0]->page_count) *
16422 sizeof(struct dma_address));
16423 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16424 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
16425 LPFC_SLI4_MBX_NEMBED);
16426 if (alloclen < length) {
16427 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16428 "3098 Allocated DMA memory size (%d) is "
16429 "less than the requested DMA memory size "
16430 "(%d)\n", alloclen, length);
16431 status = -ENOMEM;
16432 goto out;
16433 }
16434 cq_set = mbox->sge_array->addr[0];
16435 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
16436 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
16437
16438 for (idx = 0; idx < numcq; idx++) {
16439 cq = cqp[idx];
16440 eq = hdwq[idx].hba_eq;
16441 if (!cq || !eq) {
16442 status = -ENOMEM;
16443 goto out;
16444 }
16445 if (!phba->sli4_hba.pc_sli4_params.supported)
16446 hw_page_size = cq->page_size;
16447
16448 switch (idx) {
16449 case 0:
16450 bf_set(lpfc_mbx_cq_create_set_page_size,
16451 &cq_set->u.request,
16452 (hw_page_size / SLI4_PAGE_SIZE));
16453 bf_set(lpfc_mbx_cq_create_set_num_pages,
16454 &cq_set->u.request, cq->page_count);
16455 bf_set(lpfc_mbx_cq_create_set_evt,
16456 &cq_set->u.request, 1);
16457 bf_set(lpfc_mbx_cq_create_set_valid,
16458 &cq_set->u.request, 1);
16459 bf_set(lpfc_mbx_cq_create_set_cqe_size,
16460 &cq_set->u.request, 0);
16461 bf_set(lpfc_mbx_cq_create_set_num_cq,
16462 &cq_set->u.request, numcq);
16463 bf_set(lpfc_mbx_cq_create_set_autovalid,
16464 &cq_set->u.request,
16465 phba->sli4_hba.pc_sli4_params.cqav);
16466 switch (cq->entry_count) {
16467 case 2048:
16468 case 4096:
16469 if (phba->sli4_hba.pc_sli4_params.cqv ==
16470 LPFC_Q_CREATE_VERSION_2) {
16471 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16472 &cq_set->u.request,
16473 cq->entry_count);
16474 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16475 &cq_set->u.request,
16476 LPFC_CQ_CNT_WORD7);
16477 break;
16478 }
16479 fallthrough;
16480 default:
16481 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16482 "3118 Bad CQ count. (%d)\n",
16483 cq->entry_count);
16484 if (cq->entry_count < 256) {
16485 status = -EINVAL;
16486 goto out;
16487 }
16488 fallthrough; /* otherwise default to smallest */
16489 case 256:
16490 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16491 &cq_set->u.request, LPFC_CQ_CNT_256);
16492 break;
16493 case 512:
16494 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16495 &cq_set->u.request, LPFC_CQ_CNT_512);
16496 break;
16497 case 1024:
16498 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16499 &cq_set->u.request, LPFC_CQ_CNT_1024);
16500 break;
16501 }
16502 bf_set(lpfc_mbx_cq_create_set_eq_id0,
16503 &cq_set->u.request, eq->queue_id);
16504 break;
16505 case 1:
16506 bf_set(lpfc_mbx_cq_create_set_eq_id1,
16507 &cq_set->u.request, eq->queue_id);
16508 break;
16509 case 2:
16510 bf_set(lpfc_mbx_cq_create_set_eq_id2,
16511 &cq_set->u.request, eq->queue_id);
16512 break;
16513 case 3:
16514 bf_set(lpfc_mbx_cq_create_set_eq_id3,
16515 &cq_set->u.request, eq->queue_id);
16516 break;
16517 case 4:
16518 bf_set(lpfc_mbx_cq_create_set_eq_id4,
16519 &cq_set->u.request, eq->queue_id);
16520 break;
16521 case 5:
16522 bf_set(lpfc_mbx_cq_create_set_eq_id5,
16523 &cq_set->u.request, eq->queue_id);
16524 break;
16525 case 6:
16526 bf_set(lpfc_mbx_cq_create_set_eq_id6,
16527 &cq_set->u.request, eq->queue_id);
16528 break;
16529 case 7:
16530 bf_set(lpfc_mbx_cq_create_set_eq_id7,
16531 &cq_set->u.request, eq->queue_id);
16532 break;
16533 case 8:
16534 bf_set(lpfc_mbx_cq_create_set_eq_id8,
16535 &cq_set->u.request, eq->queue_id);
16536 break;
16537 case 9:
16538 bf_set(lpfc_mbx_cq_create_set_eq_id9,
16539 &cq_set->u.request, eq->queue_id);
16540 break;
16541 case 10:
16542 bf_set(lpfc_mbx_cq_create_set_eq_id10,
16543 &cq_set->u.request, eq->queue_id);
16544 break;
16545 case 11:
16546 bf_set(lpfc_mbx_cq_create_set_eq_id11,
16547 &cq_set->u.request, eq->queue_id);
16548 break;
16549 case 12:
16550 bf_set(lpfc_mbx_cq_create_set_eq_id12,
16551 &cq_set->u.request, eq->queue_id);
16552 break;
16553 case 13:
16554 bf_set(lpfc_mbx_cq_create_set_eq_id13,
16555 &cq_set->u.request, eq->queue_id);
16556 break;
16557 case 14:
16558 bf_set(lpfc_mbx_cq_create_set_eq_id14,
16559 &cq_set->u.request, eq->queue_id);
16560 break;
16561 case 15:
16562 bf_set(lpfc_mbx_cq_create_set_eq_id15,
16563 &cq_set->u.request, eq->queue_id);
16564 break;
16565 }
16566
16567 /* link the cq onto the parent eq child list */
16568 list_add_tail(&cq->list, &eq->child_list);
16569 /* Set up completion queue's type and subtype */
16570 cq->type = type;
16571 cq->subtype = subtype;
16572 cq->assoc_qid = eq->queue_id;
16573 cq->assoc_qp = eq;
16574 cq->host_index = 0;
16575 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16576 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
16577 cq->entry_count);
16578 cq->chann = idx;
16579
16580 rc = 0;
16581 list_for_each_entry(dmabuf, &cq->page_list, list) {
16582 memset(dmabuf->virt, 0, hw_page_size);
16583 cnt = page_idx + dmabuf->buffer_tag;
16584 cq_set->u.request.page[cnt].addr_lo =
16585 putPaddrLow(dmabuf->phys);
16586 cq_set->u.request.page[cnt].addr_hi =
16587 putPaddrHigh(dmabuf->phys);
16588 rc++;
16589 }
16590 page_idx += rc;
16591 }
16592
16593 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16594
16595 /* The IOCTL status is embedded in the mailbox subheader. */
16596 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16597 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16598 if (shdr_status || shdr_add_status || rc) {
16599 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16600 "3119 CQ_CREATE_SET mailbox failed with "
16601 "status x%x add_status x%x, mbx status x%x\n",
16602 shdr_status, shdr_add_status, rc);
16603 status = -ENXIO;
16604 goto out;
16605 }
16606 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
16607 if (rc == 0xFFFF) {
16608 status = -ENXIO;
16609 goto out;
16610 }
16611
16612 for (idx = 0; idx < numcq; idx++) {
16613 cq = cqp[idx];
16614 cq->queue_id = rc + idx;
16615 if (cq->queue_id > phba->sli4_hba.cq_max)
16616 phba->sli4_hba.cq_max = cq->queue_id;
16617 }
16618
16619out:
16620 lpfc_sli4_mbox_cmd_free(phba, mbox);
16621 return status;
16622}
16623
16624/**
16625 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
16626 * @phba: HBA structure that indicates port to create a queue on.
16627 * @mq: The queue structure to use to create the mailbox queue.
16628 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
16629 * @cq: The completion queue to associate with this cq.
16630 *
16631 * This function provides failback (fb) functionality when the
16632 * mq_create_ext fails on older FW generations. It's purpose is identical
16633 * to mq_create_ext otherwise.
16634 *
16635 * This routine cannot fail as all attributes were previously accessed and
16636 * initialized in mq_create_ext.
16637 **/
16638static void
16639lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
16640 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
16641{
16642 struct lpfc_mbx_mq_create *mq_create;
16643 struct lpfc_dmabuf *dmabuf;
16644 int length;
16645
16646 length = (sizeof(struct lpfc_mbx_mq_create) -
16647 sizeof(struct lpfc_sli4_cfg_mhdr));
16648 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16649 LPFC_MBOX_OPCODE_MQ_CREATE,
16650 length, LPFC_SLI4_MBX_EMBED);
16651 mq_create = &mbox->u.mqe.un.mq_create;
16652 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
16653 mq->page_count);
16654 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
16655 cq->queue_id);
16656 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
16657 switch (mq->entry_count) {
16658 case 16:
16659 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16660 LPFC_MQ_RING_SIZE_16);
16661 break;
16662 case 32:
16663 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16664 LPFC_MQ_RING_SIZE_32);
16665 break;
16666 case 64:
16667 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16668 LPFC_MQ_RING_SIZE_64);
16669 break;
16670 case 128:
16671 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16672 LPFC_MQ_RING_SIZE_128);
16673 break;
16674 }
16675 list_for_each_entry(dmabuf, &mq->page_list, list) {
16676 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16677 putPaddrLow(dmabuf->phys);
16678 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16679 putPaddrHigh(dmabuf->phys);
16680 }
16681}
16682
16683/**
16684 * lpfc_mq_create - Create a mailbox Queue on the HBA
16685 * @phba: HBA structure that indicates port to create a queue on.
16686 * @mq: The queue structure to use to create the mailbox queue.
16687 * @cq: The completion queue to associate with this cq.
16688 * @subtype: The queue's subtype.
16689 *
16690 * This function creates a mailbox queue, as detailed in @mq, on a port,
16691 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
16692 *
16693 * The @phba struct is used to send mailbox command to HBA. The @cq struct
16694 * is used to get the entry count and entry size that are necessary to
16695 * determine the number of pages to allocate and use for this queue. This
16696 * function will send the MQ_CREATE mailbox command to the HBA to setup the
16697 * mailbox queue. This function is asynchronous and will wait for the mailbox
16698 * command to finish before continuing.
16699 *
16700 * On success this function will return a zero. If unable to allocate enough
16701 * memory this function will return -ENOMEM. If the queue create mailbox command
16702 * fails this function will return -ENXIO.
16703 **/
16704int32_t
16705lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
16706 struct lpfc_queue *cq, uint32_t subtype)
16707{
16708 struct lpfc_mbx_mq_create *mq_create;
16709 struct lpfc_mbx_mq_create_ext *mq_create_ext;
16710 struct lpfc_dmabuf *dmabuf;
16711 LPFC_MBOXQ_t *mbox;
16712 int rc, length, status = 0;
16713 uint32_t shdr_status, shdr_add_status;
16714 union lpfc_sli4_cfg_shdr *shdr;
16715 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16716
16717 /* sanity check on queue memory */
16718 if (!mq || !cq)
16719 return -ENODEV;
16720 if (!phba->sli4_hba.pc_sli4_params.supported)
16721 hw_page_size = SLI4_PAGE_SIZE;
16722
16723 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16724 if (!mbox)
16725 return -ENOMEM;
16726 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
16727 sizeof(struct lpfc_sli4_cfg_mhdr));
16728 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16729 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
16730 length, LPFC_SLI4_MBX_EMBED);
16731
16732 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
16733 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
16734 bf_set(lpfc_mbx_mq_create_ext_num_pages,
16735 &mq_create_ext->u.request, mq->page_count);
16736 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16737 &mq_create_ext->u.request, 1);
16738 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16739 &mq_create_ext->u.request, 1);
16740 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16741 &mq_create_ext->u.request, 1);
16742 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16743 &mq_create_ext->u.request, 1);
16744 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16745 &mq_create_ext->u.request, 1);
16746 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16747 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16748 phba->sli4_hba.pc_sli4_params.mqv);
16749 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16750 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16751 cq->queue_id);
16752 else
16753 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16754 cq->queue_id);
16755 switch (mq->entry_count) {
16756 default:
16757 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16758 "0362 Unsupported MQ count. (%d)\n",
16759 mq->entry_count);
16760 if (mq->entry_count < 16) {
16761 status = -EINVAL;
16762 goto out;
16763 }
16764 fallthrough; /* otherwise default to smallest count */
16765 case 16:
16766 bf_set(lpfc_mq_context_ring_size,
16767 &mq_create_ext->u.request.context,
16768 LPFC_MQ_RING_SIZE_16);
16769 break;
16770 case 32:
16771 bf_set(lpfc_mq_context_ring_size,
16772 &mq_create_ext->u.request.context,
16773 LPFC_MQ_RING_SIZE_32);
16774 break;
16775 case 64:
16776 bf_set(lpfc_mq_context_ring_size,
16777 &mq_create_ext->u.request.context,
16778 LPFC_MQ_RING_SIZE_64);
16779 break;
16780 case 128:
16781 bf_set(lpfc_mq_context_ring_size,
16782 &mq_create_ext->u.request.context,
16783 LPFC_MQ_RING_SIZE_128);
16784 break;
16785 }
16786 list_for_each_entry(dmabuf, &mq->page_list, list) {
16787 memset(dmabuf->virt, 0, hw_page_size);
16788 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16789 putPaddrLow(dmabuf->phys);
16790 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16791 putPaddrHigh(dmabuf->phys);
16792 }
16793 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16794 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16795 &mq_create_ext->u.response);
16796 if (rc != MBX_SUCCESS) {
16797 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16798 "2795 MQ_CREATE_EXT failed with "
16799 "status x%x. Failback to MQ_CREATE.\n",
16800 rc);
16801 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16802 mq_create = &mbox->u.mqe.un.mq_create;
16803 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16804 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16805 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16806 &mq_create->u.response);
16807 }
16808
16809 /* The IOCTL status is embedded in the mailbox subheader. */
16810 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16811 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16812 if (shdr_status || shdr_add_status || rc) {
16813 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16814 "2502 MQ_CREATE mailbox failed with "
16815 "status x%x add_status x%x, mbx status x%x\n",
16816 shdr_status, shdr_add_status, rc);
16817 status = -ENXIO;
16818 goto out;
16819 }
16820 if (mq->queue_id == 0xFFFF) {
16821 status = -ENXIO;
16822 goto out;
16823 }
16824 mq->type = LPFC_MQ;
16825 mq->assoc_qid = cq->queue_id;
16826 mq->subtype = subtype;
16827 mq->host_index = 0;
16828 mq->hba_index = 0;
16829
16830 /* link the mq onto the parent cq child list */
16831 list_add_tail(&mq->list, &cq->child_list);
16832out:
16833 mempool_free(mbox, phba->mbox_mem_pool);
16834 return status;
16835}
16836
16837/**
16838 * lpfc_wq_create - Create a Work Queue on the HBA
16839 * @phba: HBA structure that indicates port to create a queue on.
16840 * @wq: The queue structure to use to create the work queue.
16841 * @cq: The completion queue to bind this work queue to.
16842 * @subtype: The subtype of the work queue indicating its functionality.
16843 *
16844 * This function creates a work queue, as detailed in @wq, on a port, described
16845 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16846 *
16847 * The @phba struct is used to send mailbox command to HBA. The @wq struct
16848 * is used to get the entry count and entry size that are necessary to
16849 * determine the number of pages to allocate and use for this queue. The @cq
16850 * is used to indicate which completion queue to bind this work queue to. This
16851 * function will send the WQ_CREATE mailbox command to the HBA to setup the
16852 * work queue. This function is asynchronous and will wait for the mailbox
16853 * command to finish before continuing.
16854 *
16855 * On success this function will return a zero. If unable to allocate enough
16856 * memory this function will return -ENOMEM. If the queue create mailbox command
16857 * fails this function will return -ENXIO.
16858 **/
16859int
16860lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16861 struct lpfc_queue *cq, uint32_t subtype)
16862{
16863 struct lpfc_mbx_wq_create *wq_create;
16864 struct lpfc_dmabuf *dmabuf;
16865 LPFC_MBOXQ_t *mbox;
16866 int rc, length, status = 0;
16867 uint32_t shdr_status, shdr_add_status;
16868 union lpfc_sli4_cfg_shdr *shdr;
16869 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16870 struct dma_address *page;
16871 void __iomem *bar_memmap_p;
16872 uint32_t db_offset;
16873 uint16_t pci_barset;
16874 uint8_t dpp_barset;
16875 uint32_t dpp_offset;
16876 uint8_t wq_create_version;
16877#ifdef CONFIG_X86
16878 unsigned long pg_addr;
16879#endif
16880
16881 /* sanity check on queue memory */
16882 if (!wq || !cq)
16883 return -ENODEV;
16884 if (!phba->sli4_hba.pc_sli4_params.supported)
16885 hw_page_size = wq->page_size;
16886
16887 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16888 if (!mbox)
16889 return -ENOMEM;
16890 length = (sizeof(struct lpfc_mbx_wq_create) -
16891 sizeof(struct lpfc_sli4_cfg_mhdr));
16892 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16893 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16894 length, LPFC_SLI4_MBX_EMBED);
16895 wq_create = &mbox->u.mqe.un.wq_create;
16896 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16897 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16898 wq->page_count);
16899 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16900 cq->queue_id);
16901
16902 /* wqv is the earliest version supported, NOT the latest */
16903 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16904 phba->sli4_hba.pc_sli4_params.wqv);
16905
16906 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16907 (wq->page_size > SLI4_PAGE_SIZE))
16908 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16909 else
16910 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16911
16912 switch (wq_create_version) {
16913 case LPFC_Q_CREATE_VERSION_1:
16914 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16915 wq->entry_count);
16916 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16917 LPFC_Q_CREATE_VERSION_1);
16918
16919 switch (wq->entry_size) {
16920 default:
16921 case 64:
16922 bf_set(lpfc_mbx_wq_create_wqe_size,
16923 &wq_create->u.request_1,
16924 LPFC_WQ_WQE_SIZE_64);
16925 break;
16926 case 128:
16927 bf_set(lpfc_mbx_wq_create_wqe_size,
16928 &wq_create->u.request_1,
16929 LPFC_WQ_WQE_SIZE_128);
16930 break;
16931 }
16932 /* Request DPP by default */
16933 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16934 bf_set(lpfc_mbx_wq_create_page_size,
16935 &wq_create->u.request_1,
16936 (wq->page_size / SLI4_PAGE_SIZE));
16937 page = wq_create->u.request_1.page;
16938 break;
16939 default:
16940 page = wq_create->u.request.page;
16941 break;
16942 }
16943
16944 list_for_each_entry(dmabuf, &wq->page_list, list) {
16945 memset(dmabuf->virt, 0, hw_page_size);
16946 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16947 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16948 }
16949
16950 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16951 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16952
16953 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16954 /* The IOCTL status is embedded in the mailbox subheader. */
16955 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16956 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16957 if (shdr_status || shdr_add_status || rc) {
16958 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16959 "2503 WQ_CREATE mailbox failed with "
16960 "status x%x add_status x%x, mbx status x%x\n",
16961 shdr_status, shdr_add_status, rc);
16962 status = -ENXIO;
16963 goto out;
16964 }
16965
16966 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16967 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16968 &wq_create->u.response);
16969 else
16970 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16971 &wq_create->u.response_1);
16972
16973 if (wq->queue_id == 0xFFFF) {
16974 status = -ENXIO;
16975 goto out;
16976 }
16977
16978 wq->db_format = LPFC_DB_LIST_FORMAT;
16979 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16980 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16981 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16982 &wq_create->u.response);
16983 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16984 (wq->db_format != LPFC_DB_RING_FORMAT)) {
16985 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16986 "3265 WQ[%d] doorbell format "
16987 "not supported: x%x\n",
16988 wq->queue_id, wq->db_format);
16989 status = -EINVAL;
16990 goto out;
16991 }
16992 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16993 &wq_create->u.response);
16994 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16995 pci_barset);
16996 if (!bar_memmap_p) {
16997 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16998 "3263 WQ[%d] failed to memmap "
16999 "pci barset:x%x\n",
17000 wq->queue_id, pci_barset);
17001 status = -ENOMEM;
17002 goto out;
17003 }
17004 db_offset = wq_create->u.response.doorbell_offset;
17005 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
17006 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
17007 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17008 "3252 WQ[%d] doorbell offset "
17009 "not supported: x%x\n",
17010 wq->queue_id, db_offset);
17011 status = -EINVAL;
17012 goto out;
17013 }
17014 wq->db_regaddr = bar_memmap_p + db_offset;
17015 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17016 "3264 WQ[%d]: barset:x%x, offset:x%x, "
17017 "format:x%x\n", wq->queue_id,
17018 pci_barset, db_offset, wq->db_format);
17019 } else
17020 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17021 } else {
17022 /* Check if DPP was honored by the firmware */
17023 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
17024 &wq_create->u.response_1);
17025 if (wq->dpp_enable) {
17026 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
17027 &wq_create->u.response_1);
17028 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17029 pci_barset);
17030 if (!bar_memmap_p) {
17031 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17032 "3267 WQ[%d] failed to memmap "
17033 "pci barset:x%x\n",
17034 wq->queue_id, pci_barset);
17035 status = -ENOMEM;
17036 goto out;
17037 }
17038 db_offset = wq_create->u.response_1.doorbell_offset;
17039 wq->db_regaddr = bar_memmap_p + db_offset;
17040 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
17041 &wq_create->u.response_1);
17042 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
17043 &wq_create->u.response_1);
17044 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17045 dpp_barset);
17046 if (!bar_memmap_p) {
17047 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17048 "3268 WQ[%d] failed to memmap "
17049 "pci barset:x%x\n",
17050 wq->queue_id, dpp_barset);
17051 status = -ENOMEM;
17052 goto out;
17053 }
17054 dpp_offset = wq_create->u.response_1.dpp_offset;
17055 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
17056 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17057 "3271 WQ[%d]: barset:x%x, offset:x%x, "
17058 "dpp_id:x%x dpp_barset:x%x "
17059 "dpp_offset:x%x\n",
17060 wq->queue_id, pci_barset, db_offset,
17061 wq->dpp_id, dpp_barset, dpp_offset);
17062
17063#ifdef CONFIG_X86
17064 /* Enable combined writes for DPP aperture */
17065 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
17066 rc = set_memory_wc(pg_addr, 1);
17067 if (rc) {
17068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17069 "3272 Cannot setup Combined "
17070 "Write on WQ[%d] - disable DPP\n",
17071 wq->queue_id);
17072 phba->cfg_enable_dpp = 0;
17073 }
17074#else
17075 phba->cfg_enable_dpp = 0;
17076#endif
17077 } else
17078 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17079 }
17080 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
17081 if (wq->pring == NULL) {
17082 status = -ENOMEM;
17083 goto out;
17084 }
17085 wq->type = LPFC_WQ;
17086 wq->assoc_qid = cq->queue_id;
17087 wq->subtype = subtype;
17088 wq->host_index = 0;
17089 wq->hba_index = 0;
17090 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
17091
17092 /* link the wq onto the parent cq child list */
17093 list_add_tail(&wq->list, &cq->child_list);
17094out:
17095 mempool_free(mbox, phba->mbox_mem_pool);
17096 return status;
17097}
17098
17099/**
17100 * lpfc_rq_create - Create a Receive Queue on the HBA
17101 * @phba: HBA structure that indicates port to create a queue on.
17102 * @hrq: The queue structure to use to create the header receive queue.
17103 * @drq: The queue structure to use to create the data receive queue.
17104 * @cq: The completion queue to bind this work queue to.
17105 * @subtype: The subtype of the work queue indicating its functionality.
17106 *
17107 * This function creates a receive buffer queue pair , as detailed in @hrq and
17108 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17109 * to the HBA.
17110 *
17111 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17112 * struct is used to get the entry count that is necessary to determine the
17113 * number of pages to use for this queue. The @cq is used to indicate which
17114 * completion queue to bind received buffers that are posted to these queues to.
17115 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17116 * receive queue pair. This function is asynchronous and will wait for the
17117 * mailbox command to finish before continuing.
17118 *
17119 * On success this function will return a zero. If unable to allocate enough
17120 * memory this function will return -ENOMEM. If the queue create mailbox command
17121 * fails this function will return -ENXIO.
17122 **/
17123int
17124lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17125 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
17126{
17127 struct lpfc_mbx_rq_create *rq_create;
17128 struct lpfc_dmabuf *dmabuf;
17129 LPFC_MBOXQ_t *mbox;
17130 int rc, length, status = 0;
17131 uint32_t shdr_status, shdr_add_status;
17132 union lpfc_sli4_cfg_shdr *shdr;
17133 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17134 void __iomem *bar_memmap_p;
17135 uint32_t db_offset;
17136 uint16_t pci_barset;
17137
17138 /* sanity check on queue memory */
17139 if (!hrq || !drq || !cq)
17140 return -ENODEV;
17141 if (!phba->sli4_hba.pc_sli4_params.supported)
17142 hw_page_size = SLI4_PAGE_SIZE;
17143
17144 if (hrq->entry_count != drq->entry_count)
17145 return -EINVAL;
17146 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17147 if (!mbox)
17148 return -ENOMEM;
17149 length = (sizeof(struct lpfc_mbx_rq_create) -
17150 sizeof(struct lpfc_sli4_cfg_mhdr));
17151 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17152 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17153 length, LPFC_SLI4_MBX_EMBED);
17154 rq_create = &mbox->u.mqe.un.rq_create;
17155 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17156 bf_set(lpfc_mbox_hdr_version, &shdr->request,
17157 phba->sli4_hba.pc_sli4_params.rqv);
17158 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17159 bf_set(lpfc_rq_context_rqe_count_1,
17160 &rq_create->u.request.context,
17161 hrq->entry_count);
17162 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
17163 bf_set(lpfc_rq_context_rqe_size,
17164 &rq_create->u.request.context,
17165 LPFC_RQE_SIZE_8);
17166 bf_set(lpfc_rq_context_page_size,
17167 &rq_create->u.request.context,
17168 LPFC_RQ_PAGE_SIZE_4096);
17169 } else {
17170 switch (hrq->entry_count) {
17171 default:
17172 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17173 "2535 Unsupported RQ count. (%d)\n",
17174 hrq->entry_count);
17175 if (hrq->entry_count < 512) {
17176 status = -EINVAL;
17177 goto out;
17178 }
17179 fallthrough; /* otherwise default to smallest count */
17180 case 512:
17181 bf_set(lpfc_rq_context_rqe_count,
17182 &rq_create->u.request.context,
17183 LPFC_RQ_RING_SIZE_512);
17184 break;
17185 case 1024:
17186 bf_set(lpfc_rq_context_rqe_count,
17187 &rq_create->u.request.context,
17188 LPFC_RQ_RING_SIZE_1024);
17189 break;
17190 case 2048:
17191 bf_set(lpfc_rq_context_rqe_count,
17192 &rq_create->u.request.context,
17193 LPFC_RQ_RING_SIZE_2048);
17194 break;
17195 case 4096:
17196 bf_set(lpfc_rq_context_rqe_count,
17197 &rq_create->u.request.context,
17198 LPFC_RQ_RING_SIZE_4096);
17199 break;
17200 }
17201 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
17202 LPFC_HDR_BUF_SIZE);
17203 }
17204 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17205 cq->queue_id);
17206 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17207 hrq->page_count);
17208 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17209 memset(dmabuf->virt, 0, hw_page_size);
17210 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17211 putPaddrLow(dmabuf->phys);
17212 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17213 putPaddrHigh(dmabuf->phys);
17214 }
17215 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17216 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17217
17218 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17219 /* The IOCTL status is embedded in the mailbox subheader. */
17220 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17221 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17222 if (shdr_status || shdr_add_status || rc) {
17223 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17224 "2504 RQ_CREATE mailbox failed with "
17225 "status x%x add_status x%x, mbx status x%x\n",
17226 shdr_status, shdr_add_status, rc);
17227 status = -ENXIO;
17228 goto out;
17229 }
17230 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17231 if (hrq->queue_id == 0xFFFF) {
17232 status = -ENXIO;
17233 goto out;
17234 }
17235
17236 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
17237 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
17238 &rq_create->u.response);
17239 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
17240 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
17241 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17242 "3262 RQ [%d] doorbell format not "
17243 "supported: x%x\n", hrq->queue_id,
17244 hrq->db_format);
17245 status = -EINVAL;
17246 goto out;
17247 }
17248
17249 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
17250 &rq_create->u.response);
17251 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
17252 if (!bar_memmap_p) {
17253 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17254 "3269 RQ[%d] failed to memmap pci "
17255 "barset:x%x\n", hrq->queue_id,
17256 pci_barset);
17257 status = -ENOMEM;
17258 goto out;
17259 }
17260
17261 db_offset = rq_create->u.response.doorbell_offset;
17262 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
17263 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
17264 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17265 "3270 RQ[%d] doorbell offset not "
17266 "supported: x%x\n", hrq->queue_id,
17267 db_offset);
17268 status = -EINVAL;
17269 goto out;
17270 }
17271 hrq->db_regaddr = bar_memmap_p + db_offset;
17272 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17273 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
17274 "format:x%x\n", hrq->queue_id, pci_barset,
17275 db_offset, hrq->db_format);
17276 } else {
17277 hrq->db_format = LPFC_DB_RING_FORMAT;
17278 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17279 }
17280 hrq->type = LPFC_HRQ;
17281 hrq->assoc_qid = cq->queue_id;
17282 hrq->subtype = subtype;
17283 hrq->host_index = 0;
17284 hrq->hba_index = 0;
17285 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17286
17287 /* now create the data queue */
17288 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17289 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17290 length, LPFC_SLI4_MBX_EMBED);
17291 bf_set(lpfc_mbox_hdr_version, &shdr->request,
17292 phba->sli4_hba.pc_sli4_params.rqv);
17293 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17294 bf_set(lpfc_rq_context_rqe_count_1,
17295 &rq_create->u.request.context, hrq->entry_count);
17296 if (subtype == LPFC_NVMET)
17297 rq_create->u.request.context.buffer_size =
17298 LPFC_NVMET_DATA_BUF_SIZE;
17299 else
17300 rq_create->u.request.context.buffer_size =
17301 LPFC_DATA_BUF_SIZE;
17302 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
17303 LPFC_RQE_SIZE_8);
17304 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
17305 (PAGE_SIZE/SLI4_PAGE_SIZE));
17306 } else {
17307 switch (drq->entry_count) {
17308 default:
17309 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17310 "2536 Unsupported RQ count. (%d)\n",
17311 drq->entry_count);
17312 if (drq->entry_count < 512) {
17313 status = -EINVAL;
17314 goto out;
17315 }
17316 fallthrough; /* otherwise default to smallest count */
17317 case 512:
17318 bf_set(lpfc_rq_context_rqe_count,
17319 &rq_create->u.request.context,
17320 LPFC_RQ_RING_SIZE_512);
17321 break;
17322 case 1024:
17323 bf_set(lpfc_rq_context_rqe_count,
17324 &rq_create->u.request.context,
17325 LPFC_RQ_RING_SIZE_1024);
17326 break;
17327 case 2048:
17328 bf_set(lpfc_rq_context_rqe_count,
17329 &rq_create->u.request.context,
17330 LPFC_RQ_RING_SIZE_2048);
17331 break;
17332 case 4096:
17333 bf_set(lpfc_rq_context_rqe_count,
17334 &rq_create->u.request.context,
17335 LPFC_RQ_RING_SIZE_4096);
17336 break;
17337 }
17338 if (subtype == LPFC_NVMET)
17339 bf_set(lpfc_rq_context_buf_size,
17340 &rq_create->u.request.context,
17341 LPFC_NVMET_DATA_BUF_SIZE);
17342 else
17343 bf_set(lpfc_rq_context_buf_size,
17344 &rq_create->u.request.context,
17345 LPFC_DATA_BUF_SIZE);
17346 }
17347 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17348 cq->queue_id);
17349 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17350 drq->page_count);
17351 list_for_each_entry(dmabuf, &drq->page_list, list) {
17352 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17353 putPaddrLow(dmabuf->phys);
17354 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17355 putPaddrHigh(dmabuf->phys);
17356 }
17357 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17358 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17359 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17360 /* The IOCTL status is embedded in the mailbox subheader. */
17361 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17362 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17363 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17364 if (shdr_status || shdr_add_status || rc) {
17365 status = -ENXIO;
17366 goto out;
17367 }
17368 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17369 if (drq->queue_id == 0xFFFF) {
17370 status = -ENXIO;
17371 goto out;
17372 }
17373 drq->type = LPFC_DRQ;
17374 drq->assoc_qid = cq->queue_id;
17375 drq->subtype = subtype;
17376 drq->host_index = 0;
17377 drq->hba_index = 0;
17378 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17379
17380 /* link the header and data RQs onto the parent cq child list */
17381 list_add_tail(&hrq->list, &cq->child_list);
17382 list_add_tail(&drq->list, &cq->child_list);
17383
17384out:
17385 mempool_free(mbox, phba->mbox_mem_pool);
17386 return status;
17387}
17388
17389/**
17390 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
17391 * @phba: HBA structure that indicates port to create a queue on.
17392 * @hrqp: The queue structure array to use to create the header receive queues.
17393 * @drqp: The queue structure array to use to create the data receive queues.
17394 * @cqp: The completion queue array to bind these receive queues to.
17395 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
17396 *
17397 * This function creates a receive buffer queue pair , as detailed in @hrq and
17398 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17399 * to the HBA.
17400 *
17401 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17402 * struct is used to get the entry count that is necessary to determine the
17403 * number of pages to use for this queue. The @cq is used to indicate which
17404 * completion queue to bind received buffers that are posted to these queues to.
17405 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17406 * receive queue pair. This function is asynchronous and will wait for the
17407 * mailbox command to finish before continuing.
17408 *
17409 * On success this function will return a zero. If unable to allocate enough
17410 * memory this function will return -ENOMEM. If the queue create mailbox command
17411 * fails this function will return -ENXIO.
17412 **/
17413int
17414lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
17415 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17416 uint32_t subtype)
17417{
17418 struct lpfc_queue *hrq, *drq, *cq;
17419 struct lpfc_mbx_rq_create_v2 *rq_create;
17420 struct lpfc_dmabuf *dmabuf;
17421 LPFC_MBOXQ_t *mbox;
17422 int rc, length, alloclen, status = 0;
17423 int cnt, idx, numrq, page_idx = 0;
17424 uint32_t shdr_status, shdr_add_status;
17425 union lpfc_sli4_cfg_shdr *shdr;
17426 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17427
17428 numrq = phba->cfg_nvmet_mrq;
17429 /* sanity check on array memory */
17430 if (!hrqp || !drqp || !cqp || !numrq)
17431 return -ENODEV;
17432 if (!phba->sli4_hba.pc_sli4_params.supported)
17433 hw_page_size = SLI4_PAGE_SIZE;
17434
17435 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17436 if (!mbox)
17437 return -ENOMEM;
17438
17439 length = sizeof(struct lpfc_mbx_rq_create_v2);
17440 length += ((2 * numrq * hrqp[0]->page_count) *
17441 sizeof(struct dma_address));
17442
17443 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17444 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
17445 LPFC_SLI4_MBX_NEMBED);
17446 if (alloclen < length) {
17447 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17448 "3099 Allocated DMA memory size (%d) is "
17449 "less than the requested DMA memory size "
17450 "(%d)\n", alloclen, length);
17451 status = -ENOMEM;
17452 goto out;
17453 }
17454
17455
17456
17457 rq_create = mbox->sge_array->addr[0];
17458 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
17459
17460 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
17461 cnt = 0;
17462
17463 for (idx = 0; idx < numrq; idx++) {
17464 hrq = hrqp[idx];
17465 drq = drqp[idx];
17466 cq = cqp[idx];
17467
17468 /* sanity check on queue memory */
17469 if (!hrq || !drq || !cq) {
17470 status = -ENODEV;
17471 goto out;
17472 }
17473
17474 if (hrq->entry_count != drq->entry_count) {
17475 status = -EINVAL;
17476 goto out;
17477 }
17478
17479 if (idx == 0) {
17480 bf_set(lpfc_mbx_rq_create_num_pages,
17481 &rq_create->u.request,
17482 hrq->page_count);
17483 bf_set(lpfc_mbx_rq_create_rq_cnt,
17484 &rq_create->u.request, (numrq * 2));
17485 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
17486 1);
17487 bf_set(lpfc_rq_context_base_cq,
17488 &rq_create->u.request.context,
17489 cq->queue_id);
17490 bf_set(lpfc_rq_context_data_size,
17491 &rq_create->u.request.context,
17492 LPFC_NVMET_DATA_BUF_SIZE);
17493 bf_set(lpfc_rq_context_hdr_size,
17494 &rq_create->u.request.context,
17495 LPFC_HDR_BUF_SIZE);
17496 bf_set(lpfc_rq_context_rqe_count_1,
17497 &rq_create->u.request.context,
17498 hrq->entry_count);
17499 bf_set(lpfc_rq_context_rqe_size,
17500 &rq_create->u.request.context,
17501 LPFC_RQE_SIZE_8);
17502 bf_set(lpfc_rq_context_page_size,
17503 &rq_create->u.request.context,
17504 (PAGE_SIZE/SLI4_PAGE_SIZE));
17505 }
17506 rc = 0;
17507 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17508 memset(dmabuf->virt, 0, hw_page_size);
17509 cnt = page_idx + dmabuf->buffer_tag;
17510 rq_create->u.request.page[cnt].addr_lo =
17511 putPaddrLow(dmabuf->phys);
17512 rq_create->u.request.page[cnt].addr_hi =
17513 putPaddrHigh(dmabuf->phys);
17514 rc++;
17515 }
17516 page_idx += rc;
17517
17518 rc = 0;
17519 list_for_each_entry(dmabuf, &drq->page_list, list) {
17520 memset(dmabuf->virt, 0, hw_page_size);
17521 cnt = page_idx + dmabuf->buffer_tag;
17522 rq_create->u.request.page[cnt].addr_lo =
17523 putPaddrLow(dmabuf->phys);
17524 rq_create->u.request.page[cnt].addr_hi =
17525 putPaddrHigh(dmabuf->phys);
17526 rc++;
17527 }
17528 page_idx += rc;
17529
17530 hrq->db_format = LPFC_DB_RING_FORMAT;
17531 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17532 hrq->type = LPFC_HRQ;
17533 hrq->assoc_qid = cq->queue_id;
17534 hrq->subtype = subtype;
17535 hrq->host_index = 0;
17536 hrq->hba_index = 0;
17537 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17538
17539 drq->db_format = LPFC_DB_RING_FORMAT;
17540 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17541 drq->type = LPFC_DRQ;
17542 drq->assoc_qid = cq->queue_id;
17543 drq->subtype = subtype;
17544 drq->host_index = 0;
17545 drq->hba_index = 0;
17546 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17547
17548 list_add_tail(&hrq->list, &cq->child_list);
17549 list_add_tail(&drq->list, &cq->child_list);
17550 }
17551
17552 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17553 /* The IOCTL status is embedded in the mailbox subheader. */
17554 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17555 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17556 if (shdr_status || shdr_add_status || rc) {
17557 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17558 "3120 RQ_CREATE mailbox failed with "
17559 "status x%x add_status x%x, mbx status x%x\n",
17560 shdr_status, shdr_add_status, rc);
17561 status = -ENXIO;
17562 goto out;
17563 }
17564 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17565 if (rc == 0xFFFF) {
17566 status = -ENXIO;
17567 goto out;
17568 }
17569
17570 /* Initialize all RQs with associated queue id */
17571 for (idx = 0; idx < numrq; idx++) {
17572 hrq = hrqp[idx];
17573 hrq->queue_id = rc + (2 * idx);
17574 drq = drqp[idx];
17575 drq->queue_id = rc + (2 * idx) + 1;
17576 }
17577
17578out:
17579 lpfc_sli4_mbox_cmd_free(phba, mbox);
17580 return status;
17581}
17582
17583/**
17584 * lpfc_eq_destroy - Destroy an event Queue on the HBA
17585 * @phba: HBA structure that indicates port to destroy a queue on.
17586 * @eq: The queue structure associated with the queue to destroy.
17587 *
17588 * This function destroys a queue, as detailed in @eq by sending an mailbox
17589 * command, specific to the type of queue, to the HBA.
17590 *
17591 * The @eq struct is used to get the queue ID of the queue to destroy.
17592 *
17593 * On success this function will return a zero. If the queue destroy mailbox
17594 * command fails this function will return -ENXIO.
17595 **/
17596int
17597lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17598{
17599 LPFC_MBOXQ_t *mbox;
17600 int rc, length, status = 0;
17601 uint32_t shdr_status, shdr_add_status;
17602 union lpfc_sli4_cfg_shdr *shdr;
17603
17604 /* sanity check on queue memory */
17605 if (!eq)
17606 return -ENODEV;
17607
17608 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17609 if (!mbox)
17610 return -ENOMEM;
17611 length = (sizeof(struct lpfc_mbx_eq_destroy) -
17612 sizeof(struct lpfc_sli4_cfg_mhdr));
17613 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17614 LPFC_MBOX_OPCODE_EQ_DESTROY,
17615 length, LPFC_SLI4_MBX_EMBED);
17616 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
17617 eq->queue_id);
17618 mbox->vport = eq->phba->pport;
17619 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17620
17621 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17622 /* The IOCTL status is embedded in the mailbox subheader. */
17623 shdr = (union lpfc_sli4_cfg_shdr *)
17624 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
17625 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17626 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17627 if (shdr_status || shdr_add_status || rc) {
17628 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17629 "2505 EQ_DESTROY mailbox failed with "
17630 "status x%x add_status x%x, mbx status x%x\n",
17631 shdr_status, shdr_add_status, rc);
17632 status = -ENXIO;
17633 }
17634
17635 /* Remove eq from any list */
17636 list_del_init(&eq->list);
17637 mempool_free(mbox, eq->phba->mbox_mem_pool);
17638 return status;
17639}
17640
17641/**
17642 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
17643 * @phba: HBA structure that indicates port to destroy a queue on.
17644 * @cq: The queue structure associated with the queue to destroy.
17645 *
17646 * This function destroys a queue, as detailed in @cq by sending an mailbox
17647 * command, specific to the type of queue, to the HBA.
17648 *
17649 * The @cq struct is used to get the queue ID of the queue to destroy.
17650 *
17651 * On success this function will return a zero. If the queue destroy mailbox
17652 * command fails this function will return -ENXIO.
17653 **/
17654int
17655lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
17656{
17657 LPFC_MBOXQ_t *mbox;
17658 int rc, length, status = 0;
17659 uint32_t shdr_status, shdr_add_status;
17660 union lpfc_sli4_cfg_shdr *shdr;
17661
17662 /* sanity check on queue memory */
17663 if (!cq)
17664 return -ENODEV;
17665 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
17666 if (!mbox)
17667 return -ENOMEM;
17668 length = (sizeof(struct lpfc_mbx_cq_destroy) -
17669 sizeof(struct lpfc_sli4_cfg_mhdr));
17670 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17671 LPFC_MBOX_OPCODE_CQ_DESTROY,
17672 length, LPFC_SLI4_MBX_EMBED);
17673 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
17674 cq->queue_id);
17675 mbox->vport = cq->phba->pport;
17676 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17677 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
17678 /* The IOCTL status is embedded in the mailbox subheader. */
17679 shdr = (union lpfc_sli4_cfg_shdr *)
17680 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
17681 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17682 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17683 if (shdr_status || shdr_add_status || rc) {
17684 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17685 "2506 CQ_DESTROY mailbox failed with "
17686 "status x%x add_status x%x, mbx status x%x\n",
17687 shdr_status, shdr_add_status, rc);
17688 status = -ENXIO;
17689 }
17690 /* Remove cq from any list */
17691 list_del_init(&cq->list);
17692 mempool_free(mbox, cq->phba->mbox_mem_pool);
17693 return status;
17694}
17695
17696/**
17697 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
17698 * @phba: HBA structure that indicates port to destroy a queue on.
17699 * @mq: The queue structure associated with the queue to destroy.
17700 *
17701 * This function destroys a queue, as detailed in @mq by sending an mailbox
17702 * command, specific to the type of queue, to the HBA.
17703 *
17704 * The @mq struct is used to get the queue ID of the queue to destroy.
17705 *
17706 * On success this function will return a zero. If the queue destroy mailbox
17707 * command fails this function will return -ENXIO.
17708 **/
17709int
17710lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
17711{
17712 LPFC_MBOXQ_t *mbox;
17713 int rc, length, status = 0;
17714 uint32_t shdr_status, shdr_add_status;
17715 union lpfc_sli4_cfg_shdr *shdr;
17716
17717 /* sanity check on queue memory */
17718 if (!mq)
17719 return -ENODEV;
17720 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
17721 if (!mbox)
17722 return -ENOMEM;
17723 length = (sizeof(struct lpfc_mbx_mq_destroy) -
17724 sizeof(struct lpfc_sli4_cfg_mhdr));
17725 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17726 LPFC_MBOX_OPCODE_MQ_DESTROY,
17727 length, LPFC_SLI4_MBX_EMBED);
17728 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
17729 mq->queue_id);
17730 mbox->vport = mq->phba->pport;
17731 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17732 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17733 /* The IOCTL status is embedded in the mailbox subheader. */
17734 shdr = (union lpfc_sli4_cfg_shdr *)
17735 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17736 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17737 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17738 if (shdr_status || shdr_add_status || rc) {
17739 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17740 "2507 MQ_DESTROY mailbox failed with "
17741 "status x%x add_status x%x, mbx status x%x\n",
17742 shdr_status, shdr_add_status, rc);
17743 status = -ENXIO;
17744 }
17745 /* Remove mq from any list */
17746 list_del_init(&mq->list);
17747 mempool_free(mbox, mq->phba->mbox_mem_pool);
17748 return status;
17749}
17750
17751/**
17752 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17753 * @phba: HBA structure that indicates port to destroy a queue on.
17754 * @wq: The queue structure associated with the queue to destroy.
17755 *
17756 * This function destroys a queue, as detailed in @wq by sending an mailbox
17757 * command, specific to the type of queue, to the HBA.
17758 *
17759 * The @wq struct is used to get the queue ID of the queue to destroy.
17760 *
17761 * On success this function will return a zero. If the queue destroy mailbox
17762 * command fails this function will return -ENXIO.
17763 **/
17764int
17765lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17766{
17767 LPFC_MBOXQ_t *mbox;
17768 int rc, length, status = 0;
17769 uint32_t shdr_status, shdr_add_status;
17770 union lpfc_sli4_cfg_shdr *shdr;
17771
17772 /* sanity check on queue memory */
17773 if (!wq)
17774 return -ENODEV;
17775 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17776 if (!mbox)
17777 return -ENOMEM;
17778 length = (sizeof(struct lpfc_mbx_wq_destroy) -
17779 sizeof(struct lpfc_sli4_cfg_mhdr));
17780 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17781 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17782 length, LPFC_SLI4_MBX_EMBED);
17783 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17784 wq->queue_id);
17785 mbox->vport = wq->phba->pport;
17786 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17787 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17788 shdr = (union lpfc_sli4_cfg_shdr *)
17789 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17790 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17791 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17792 if (shdr_status || shdr_add_status || rc) {
17793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17794 "2508 WQ_DESTROY mailbox failed with "
17795 "status x%x add_status x%x, mbx status x%x\n",
17796 shdr_status, shdr_add_status, rc);
17797 status = -ENXIO;
17798 }
17799 /* Remove wq from any list */
17800 list_del_init(&wq->list);
17801 kfree(wq->pring);
17802 wq->pring = NULL;
17803 mempool_free(mbox, wq->phba->mbox_mem_pool);
17804 return status;
17805}
17806
17807/**
17808 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17809 * @phba: HBA structure that indicates port to destroy a queue on.
17810 * @hrq: The queue structure associated with the queue to destroy.
17811 * @drq: The queue structure associated with the queue to destroy.
17812 *
17813 * This function destroys a queue, as detailed in @rq by sending an mailbox
17814 * command, specific to the type of queue, to the HBA.
17815 *
17816 * The @rq struct is used to get the queue ID of the queue to destroy.
17817 *
17818 * On success this function will return a zero. If the queue destroy mailbox
17819 * command fails this function will return -ENXIO.
17820 **/
17821int
17822lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17823 struct lpfc_queue *drq)
17824{
17825 LPFC_MBOXQ_t *mbox;
17826 int rc, length, status = 0;
17827 uint32_t shdr_status, shdr_add_status;
17828 union lpfc_sli4_cfg_shdr *shdr;
17829
17830 /* sanity check on queue memory */
17831 if (!hrq || !drq)
17832 return -ENODEV;
17833 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17834 if (!mbox)
17835 return -ENOMEM;
17836 length = (sizeof(struct lpfc_mbx_rq_destroy) -
17837 sizeof(struct lpfc_sli4_cfg_mhdr));
17838 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17839 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17840 length, LPFC_SLI4_MBX_EMBED);
17841 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17842 hrq->queue_id);
17843 mbox->vport = hrq->phba->pport;
17844 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17845 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17846 /* The IOCTL status is embedded in the mailbox subheader. */
17847 shdr = (union lpfc_sli4_cfg_shdr *)
17848 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17849 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17850 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17851 if (shdr_status || shdr_add_status || rc) {
17852 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17853 "2509 RQ_DESTROY mailbox failed with "
17854 "status x%x add_status x%x, mbx status x%x\n",
17855 shdr_status, shdr_add_status, rc);
17856 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17857 return -ENXIO;
17858 }
17859 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17860 drq->queue_id);
17861 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17862 shdr = (union lpfc_sli4_cfg_shdr *)
17863 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17864 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17865 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17866 if (shdr_status || shdr_add_status || rc) {
17867 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17868 "2510 RQ_DESTROY mailbox failed with "
17869 "status x%x add_status x%x, mbx status x%x\n",
17870 shdr_status, shdr_add_status, rc);
17871 status = -ENXIO;
17872 }
17873 list_del_init(&hrq->list);
17874 list_del_init(&drq->list);
17875 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17876 return status;
17877}
17878
17879/**
17880 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17881 * @phba: The virtual port for which this call being executed.
17882 * @pdma_phys_addr0: Physical address of the 1st SGL page.
17883 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17884 * @xritag: the xritag that ties this io to the SGL pages.
17885 *
17886 * This routine will post the sgl pages for the IO that has the xritag
17887 * that is in the iocbq structure. The xritag is assigned during iocbq
17888 * creation and persists for as long as the driver is loaded.
17889 * if the caller has fewer than 256 scatter gather segments to map then
17890 * pdma_phys_addr1 should be 0.
17891 * If the caller needs to map more than 256 scatter gather segment then
17892 * pdma_phys_addr1 should be a valid physical address.
17893 * physical address for SGLs must be 64 byte aligned.
17894 * If you are going to map 2 SGL's then the first one must have 256 entries
17895 * the second sgl can have between 1 and 256 entries.
17896 *
17897 * Return codes:
17898 * 0 - Success
17899 * -ENXIO, -ENOMEM - Failure
17900 **/
17901int
17902lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17903 dma_addr_t pdma_phys_addr0,
17904 dma_addr_t pdma_phys_addr1,
17905 uint16_t xritag)
17906{
17907 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17908 LPFC_MBOXQ_t *mbox;
17909 int rc;
17910 uint32_t shdr_status, shdr_add_status;
17911 uint32_t mbox_tmo;
17912 union lpfc_sli4_cfg_shdr *shdr;
17913
17914 if (xritag == NO_XRI) {
17915 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17916 "0364 Invalid param:\n");
17917 return -EINVAL;
17918 }
17919
17920 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17921 if (!mbox)
17922 return -ENOMEM;
17923
17924 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17925 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17926 sizeof(struct lpfc_mbx_post_sgl_pages) -
17927 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17928
17929 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17930 &mbox->u.mqe.un.post_sgl_pages;
17931 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17932 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17933
17934 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17935 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17936 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17937 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17938
17939 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17940 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17941 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17942 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17943 if (!phba->sli4_hba.intr_enable)
17944 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17945 else {
17946 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17947 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17948 }
17949 /* The IOCTL status is embedded in the mailbox subheader. */
17950 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17951 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17952 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17953 if (!phba->sli4_hba.intr_enable)
17954 mempool_free(mbox, phba->mbox_mem_pool);
17955 else if (rc != MBX_TIMEOUT)
17956 mempool_free(mbox, phba->mbox_mem_pool);
17957 if (shdr_status || shdr_add_status || rc) {
17958 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17959 "2511 POST_SGL mailbox failed with "
17960 "status x%x add_status x%x, mbx status x%x\n",
17961 shdr_status, shdr_add_status, rc);
17962 }
17963 return 0;
17964}
17965
17966/**
17967 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17968 * @phba: pointer to lpfc hba data structure.
17969 *
17970 * This routine is invoked to post rpi header templates to the
17971 * HBA consistent with the SLI-4 interface spec. This routine
17972 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17973 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17974 *
17975 * Returns
17976 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17977 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17978 **/
17979static uint16_t
17980lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17981{
17982 unsigned long xri;
17983
17984 /*
17985 * Fetch the next logical xri. Because this index is logical,
17986 * the driver starts at 0 each time.
17987 */
17988 spin_lock_irq(&phba->hbalock);
17989 xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
17990 phba->sli4_hba.max_cfg_param.max_xri);
17991 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17992 spin_unlock_irq(&phba->hbalock);
17993 return NO_XRI;
17994 } else {
17995 set_bit(xri, phba->sli4_hba.xri_bmask);
17996 phba->sli4_hba.max_cfg_param.xri_used++;
17997 }
17998 spin_unlock_irq(&phba->hbalock);
17999 return xri;
18000}
18001
18002/**
18003 * __lpfc_sli4_free_xri - Release an xri for reuse.
18004 * @phba: pointer to lpfc hba data structure.
18005 * @xri: xri to release.
18006 *
18007 * This routine is invoked to release an xri to the pool of
18008 * available rpis maintained by the driver.
18009 **/
18010static void
18011__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
18012{
18013 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
18014 phba->sli4_hba.max_cfg_param.xri_used--;
18015 }
18016}
18017
18018/**
18019 * lpfc_sli4_free_xri - Release an xri for reuse.
18020 * @phba: pointer to lpfc hba data structure.
18021 * @xri: xri to release.
18022 *
18023 * This routine is invoked to release an xri to the pool of
18024 * available rpis maintained by the driver.
18025 **/
18026void
18027lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
18028{
18029 spin_lock_irq(&phba->hbalock);
18030 __lpfc_sli4_free_xri(phba, xri);
18031 spin_unlock_irq(&phba->hbalock);
18032}
18033
18034/**
18035 * lpfc_sli4_next_xritag - Get an xritag for the io
18036 * @phba: Pointer to HBA context object.
18037 *
18038 * This function gets an xritag for the iocb. If there is no unused xritag
18039 * it will return 0xffff.
18040 * The function returns the allocated xritag if successful, else returns zero.
18041 * Zero is not a valid xritag.
18042 * The caller is not required to hold any lock.
18043 **/
18044uint16_t
18045lpfc_sli4_next_xritag(struct lpfc_hba *phba)
18046{
18047 uint16_t xri_index;
18048
18049 xri_index = lpfc_sli4_alloc_xri(phba);
18050 if (xri_index == NO_XRI)
18051 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18052 "2004 Failed to allocate XRI.last XRITAG is %d"
18053 " Max XRI is %d, Used XRI is %d\n",
18054 xri_index,
18055 phba->sli4_hba.max_cfg_param.max_xri,
18056 phba->sli4_hba.max_cfg_param.xri_used);
18057 return xri_index;
18058}
18059
18060/**
18061 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
18062 * @phba: pointer to lpfc hba data structure.
18063 * @post_sgl_list: pointer to els sgl entry list.
18064 * @post_cnt: number of els sgl entries on the list.
18065 *
18066 * This routine is invoked to post a block of driver's sgl pages to the
18067 * HBA using non-embedded mailbox command. No Lock is held. This routine
18068 * is only called when the driver is loading and after all IO has been
18069 * stopped.
18070 **/
18071static int
18072lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
18073 struct list_head *post_sgl_list,
18074 int post_cnt)
18075{
18076 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
18077 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18078 struct sgl_page_pairs *sgl_pg_pairs;
18079 void *viraddr;
18080 LPFC_MBOXQ_t *mbox;
18081 uint32_t reqlen, alloclen, pg_pairs;
18082 uint32_t mbox_tmo;
18083 uint16_t xritag_start = 0;
18084 int rc = 0;
18085 uint32_t shdr_status, shdr_add_status;
18086 union lpfc_sli4_cfg_shdr *shdr;
18087
18088 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
18089 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18090 if (reqlen > SLI4_PAGE_SIZE) {
18091 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18092 "2559 Block sgl registration required DMA "
18093 "size (%d) great than a page\n", reqlen);
18094 return -ENOMEM;
18095 }
18096
18097 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18098 if (!mbox)
18099 return -ENOMEM;
18100
18101 /* Allocate DMA memory and set up the non-embedded mailbox command */
18102 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18103 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
18104 LPFC_SLI4_MBX_NEMBED);
18105
18106 if (alloclen < reqlen) {
18107 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18108 "0285 Allocated DMA memory size (%d) is "
18109 "less than the requested DMA memory "
18110 "size (%d)\n", alloclen, reqlen);
18111 lpfc_sli4_mbox_cmd_free(phba, mbox);
18112 return -ENOMEM;
18113 }
18114 /* Set up the SGL pages in the non-embedded DMA pages */
18115 viraddr = mbox->sge_array->addr[0];
18116 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18117 sgl_pg_pairs = &sgl->sgl_pg_pairs;
18118
18119 pg_pairs = 0;
18120 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
18121 /* Set up the sge entry */
18122 sgl_pg_pairs->sgl_pg0_addr_lo =
18123 cpu_to_le32(putPaddrLow(sglq_entry->phys));
18124 sgl_pg_pairs->sgl_pg0_addr_hi =
18125 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
18126 sgl_pg_pairs->sgl_pg1_addr_lo =
18127 cpu_to_le32(putPaddrLow(0));
18128 sgl_pg_pairs->sgl_pg1_addr_hi =
18129 cpu_to_le32(putPaddrHigh(0));
18130
18131 /* Keep the first xritag on the list */
18132 if (pg_pairs == 0)
18133 xritag_start = sglq_entry->sli4_xritag;
18134 sgl_pg_pairs++;
18135 pg_pairs++;
18136 }
18137
18138 /* Complete initialization and perform endian conversion. */
18139 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18140 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
18141 sgl->word0 = cpu_to_le32(sgl->word0);
18142
18143 if (!phba->sli4_hba.intr_enable)
18144 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18145 else {
18146 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18147 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18148 }
18149 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
18150 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18151 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18152 if (!phba->sli4_hba.intr_enable)
18153 lpfc_sli4_mbox_cmd_free(phba, mbox);
18154 else if (rc != MBX_TIMEOUT)
18155 lpfc_sli4_mbox_cmd_free(phba, mbox);
18156 if (shdr_status || shdr_add_status || rc) {
18157 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18158 "2513 POST_SGL_BLOCK mailbox command failed "
18159 "status x%x add_status x%x mbx status x%x\n",
18160 shdr_status, shdr_add_status, rc);
18161 rc = -ENXIO;
18162 }
18163 return rc;
18164}
18165
18166/**
18167 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
18168 * @phba: pointer to lpfc hba data structure.
18169 * @nblist: pointer to nvme buffer list.
18170 * @count: number of scsi buffers on the list.
18171 *
18172 * This routine is invoked to post a block of @count scsi sgl pages from a
18173 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
18174 * No Lock is held.
18175 *
18176 **/
18177static int
18178lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
18179 int count)
18180{
18181 struct lpfc_io_buf *lpfc_ncmd;
18182 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18183 struct sgl_page_pairs *sgl_pg_pairs;
18184 void *viraddr;
18185 LPFC_MBOXQ_t *mbox;
18186 uint32_t reqlen, alloclen, pg_pairs;
18187 uint32_t mbox_tmo;
18188 uint16_t xritag_start = 0;
18189 int rc = 0;
18190 uint32_t shdr_status, shdr_add_status;
18191 dma_addr_t pdma_phys_bpl1;
18192 union lpfc_sli4_cfg_shdr *shdr;
18193
18194 /* Calculate the requested length of the dma memory */
18195 reqlen = count * sizeof(struct sgl_page_pairs) +
18196 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18197 if (reqlen > SLI4_PAGE_SIZE) {
18198 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
18199 "6118 Block sgl registration required DMA "
18200 "size (%d) great than a page\n", reqlen);
18201 return -ENOMEM;
18202 }
18203 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18204 if (!mbox) {
18205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18206 "6119 Failed to allocate mbox cmd memory\n");
18207 return -ENOMEM;
18208 }
18209
18210 /* Allocate DMA memory and set up the non-embedded mailbox command */
18211 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18212 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
18213 reqlen, LPFC_SLI4_MBX_NEMBED);
18214
18215 if (alloclen < reqlen) {
18216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18217 "6120 Allocated DMA memory size (%d) is "
18218 "less than the requested DMA memory "
18219 "size (%d)\n", alloclen, reqlen);
18220 lpfc_sli4_mbox_cmd_free(phba, mbox);
18221 return -ENOMEM;
18222 }
18223
18224 /* Get the first SGE entry from the non-embedded DMA memory */
18225 viraddr = mbox->sge_array->addr[0];
18226
18227 /* Set up the SGL pages in the non-embedded DMA pages */
18228 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18229 sgl_pg_pairs = &sgl->sgl_pg_pairs;
18230
18231 pg_pairs = 0;
18232 list_for_each_entry(lpfc_ncmd, nblist, list) {
18233 /* Set up the sge entry */
18234 sgl_pg_pairs->sgl_pg0_addr_lo =
18235 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
18236 sgl_pg_pairs->sgl_pg0_addr_hi =
18237 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
18238 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
18239 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
18240 SGL_PAGE_SIZE;
18241 else
18242 pdma_phys_bpl1 = 0;
18243 sgl_pg_pairs->sgl_pg1_addr_lo =
18244 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
18245 sgl_pg_pairs->sgl_pg1_addr_hi =
18246 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
18247 /* Keep the first xritag on the list */
18248 if (pg_pairs == 0)
18249 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
18250 sgl_pg_pairs++;
18251 pg_pairs++;
18252 }
18253 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18254 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
18255 /* Perform endian conversion if necessary */
18256 sgl->word0 = cpu_to_le32(sgl->word0);
18257
18258 if (!phba->sli4_hba.intr_enable) {
18259 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18260 } else {
18261 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18262 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18263 }
18264 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
18265 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18266 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18267 if (!phba->sli4_hba.intr_enable)
18268 lpfc_sli4_mbox_cmd_free(phba, mbox);
18269 else if (rc != MBX_TIMEOUT)
18270 lpfc_sli4_mbox_cmd_free(phba, mbox);
18271 if (shdr_status || shdr_add_status || rc) {
18272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18273 "6125 POST_SGL_BLOCK mailbox command failed "
18274 "status x%x add_status x%x mbx status x%x\n",
18275 shdr_status, shdr_add_status, rc);
18276 rc = -ENXIO;
18277 }
18278 return rc;
18279}
18280
18281/**
18282 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
18283 * @phba: pointer to lpfc hba data structure.
18284 * @post_nblist: pointer to the nvme buffer list.
18285 * @sb_count: number of nvme buffers.
18286 *
18287 * This routine walks a list of nvme buffers that was passed in. It attempts
18288 * to construct blocks of nvme buffer sgls which contains contiguous xris and
18289 * uses the non-embedded SGL block post mailbox commands to post to the port.
18290 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
18291 * embedded SGL post mailbox command for posting. The @post_nblist passed in
18292 * must be local list, thus no lock is needed when manipulate the list.
18293 *
18294 * Returns: 0 = failure, non-zero number of successfully posted buffers.
18295 **/
18296int
18297lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
18298 struct list_head *post_nblist, int sb_count)
18299{
18300 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
18301 int status, sgl_size;
18302 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
18303 dma_addr_t pdma_phys_sgl1;
18304 int last_xritag = NO_XRI;
18305 int cur_xritag;
18306 LIST_HEAD(prep_nblist);
18307 LIST_HEAD(blck_nblist);
18308 LIST_HEAD(nvme_nblist);
18309
18310 /* sanity check */
18311 if (sb_count <= 0)
18312 return -EINVAL;
18313
18314 sgl_size = phba->cfg_sg_dma_buf_size;
18315 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
18316 list_del_init(&lpfc_ncmd->list);
18317 block_cnt++;
18318 if ((last_xritag != NO_XRI) &&
18319 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
18320 /* a hole in xri block, form a sgl posting block */
18321 list_splice_init(&prep_nblist, &blck_nblist);
18322 post_cnt = block_cnt - 1;
18323 /* prepare list for next posting block */
18324 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18325 block_cnt = 1;
18326 } else {
18327 /* prepare list for next posting block */
18328 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18329 /* enough sgls for non-embed sgl mbox command */
18330 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
18331 list_splice_init(&prep_nblist, &blck_nblist);
18332 post_cnt = block_cnt;
18333 block_cnt = 0;
18334 }
18335 }
18336 num_posting++;
18337 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18338
18339 /* end of repost sgl list condition for NVME buffers */
18340 if (num_posting == sb_count) {
18341 if (post_cnt == 0) {
18342 /* last sgl posting block */
18343 list_splice_init(&prep_nblist, &blck_nblist);
18344 post_cnt = block_cnt;
18345 } else if (block_cnt == 1) {
18346 /* last single sgl with non-contiguous xri */
18347 if (sgl_size > SGL_PAGE_SIZE)
18348 pdma_phys_sgl1 =
18349 lpfc_ncmd->dma_phys_sgl +
18350 SGL_PAGE_SIZE;
18351 else
18352 pdma_phys_sgl1 = 0;
18353 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18354 status = lpfc_sli4_post_sgl(
18355 phba, lpfc_ncmd->dma_phys_sgl,
18356 pdma_phys_sgl1, cur_xritag);
18357 if (status) {
18358 /* Post error. Buffer unavailable. */
18359 lpfc_ncmd->flags |=
18360 LPFC_SBUF_NOT_POSTED;
18361 } else {
18362 /* Post success. Bffer available. */
18363 lpfc_ncmd->flags &=
18364 ~LPFC_SBUF_NOT_POSTED;
18365 lpfc_ncmd->status = IOSTAT_SUCCESS;
18366 num_posted++;
18367 }
18368 /* success, put on NVME buffer sgl list */
18369 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18370 }
18371 }
18372
18373 /* continue until a nembed page worth of sgls */
18374 if (post_cnt == 0)
18375 continue;
18376
18377 /* post block of NVME buffer list sgls */
18378 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
18379 post_cnt);
18380
18381 /* don't reset xirtag due to hole in xri block */
18382 if (block_cnt == 0)
18383 last_xritag = NO_XRI;
18384
18385 /* reset NVME buffer post count for next round of posting */
18386 post_cnt = 0;
18387
18388 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
18389 while (!list_empty(&blck_nblist)) {
18390 list_remove_head(&blck_nblist, lpfc_ncmd,
18391 struct lpfc_io_buf, list);
18392 if (status) {
18393 /* Post error. Mark buffer unavailable. */
18394 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
18395 } else {
18396 /* Post success, Mark buffer available. */
18397 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
18398 lpfc_ncmd->status = IOSTAT_SUCCESS;
18399 num_posted++;
18400 }
18401 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18402 }
18403 }
18404 /* Push NVME buffers with sgl posted to the available list */
18405 lpfc_io_buf_replenish(phba, &nvme_nblist);
18406
18407 return num_posted;
18408}
18409
18410/**
18411 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
18412 * @phba: pointer to lpfc_hba struct that the frame was received on
18413 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18414 *
18415 * This function checks the fields in the @fc_hdr to see if the FC frame is a
18416 * valid type of frame that the LPFC driver will handle. This function will
18417 * return a zero if the frame is a valid frame or a non zero value when the
18418 * frame does not pass the check.
18419 **/
18420static int
18421lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
18422{
18423 /* make rctl_names static to save stack space */
18424 struct fc_vft_header *fc_vft_hdr;
18425 uint32_t *header = (uint32_t *) fc_hdr;
18426
18427#define FC_RCTL_MDS_DIAGS 0xF4
18428
18429 switch (fc_hdr->fh_r_ctl) {
18430 case FC_RCTL_DD_UNCAT: /* uncategorized information */
18431 case FC_RCTL_DD_SOL_DATA: /* solicited data */
18432 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
18433 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
18434 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
18435 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
18436 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
18437 case FC_RCTL_DD_CMD_STATUS: /* command status */
18438 case FC_RCTL_ELS_REQ: /* extended link services request */
18439 case FC_RCTL_ELS_REP: /* extended link services reply */
18440 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
18441 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
18442 case FC_RCTL_BA_ABTS: /* basic link service abort */
18443 case FC_RCTL_BA_RMC: /* remove connection */
18444 case FC_RCTL_BA_ACC: /* basic accept */
18445 case FC_RCTL_BA_RJT: /* basic reject */
18446 case FC_RCTL_BA_PRMT:
18447 case FC_RCTL_ACK_1: /* acknowledge_1 */
18448 case FC_RCTL_ACK_0: /* acknowledge_0 */
18449 case FC_RCTL_P_RJT: /* port reject */
18450 case FC_RCTL_F_RJT: /* fabric reject */
18451 case FC_RCTL_P_BSY: /* port busy */
18452 case FC_RCTL_F_BSY: /* fabric busy to data frame */
18453 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
18454 case FC_RCTL_LCR: /* link credit reset */
18455 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
18456 case FC_RCTL_END: /* end */
18457 break;
18458 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
18459 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18460 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
18461 return lpfc_fc_frame_check(phba, fc_hdr);
18462 case FC_RCTL_BA_NOP: /* basic link service NOP */
18463 default:
18464 goto drop;
18465 }
18466
18467 switch (fc_hdr->fh_type) {
18468 case FC_TYPE_BLS:
18469 case FC_TYPE_ELS:
18470 case FC_TYPE_FCP:
18471 case FC_TYPE_CT:
18472 case FC_TYPE_NVME:
18473 break;
18474 case FC_TYPE_IP:
18475 case FC_TYPE_ILS:
18476 default:
18477 goto drop;
18478 }
18479
18480 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
18481 "2538 Received frame rctl:x%x, type:x%x, "
18482 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
18483 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
18484 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
18485 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
18486 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
18487 be32_to_cpu(header[6]));
18488 return 0;
18489drop:
18490 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
18491 "2539 Dropped frame rctl:x%x type:x%x\n",
18492 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18493 return 1;
18494}
18495
18496/**
18497 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
18498 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18499 *
18500 * This function processes the FC header to retrieve the VFI from the VF
18501 * header, if one exists. This function will return the VFI if one exists
18502 * or 0 if no VSAN Header exists.
18503 **/
18504static uint32_t
18505lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
18506{
18507 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18508
18509 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
18510 return 0;
18511 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
18512}
18513
18514/**
18515 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
18516 * @phba: Pointer to the HBA structure to search for the vport on
18517 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18518 * @fcfi: The FC Fabric ID that the frame came from
18519 * @did: Destination ID to match against
18520 *
18521 * This function searches the @phba for a vport that matches the content of the
18522 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
18523 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
18524 * returns the matching vport pointer or NULL if unable to match frame to a
18525 * vport.
18526 **/
18527static struct lpfc_vport *
18528lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
18529 uint16_t fcfi, uint32_t did)
18530{
18531 struct lpfc_vport **vports;
18532 struct lpfc_vport *vport = NULL;
18533 int i;
18534
18535 if (did == Fabric_DID)
18536 return phba->pport;
18537 if (test_bit(FC_PT2PT, &phba->pport->fc_flag) &&
18538 phba->link_state != LPFC_HBA_READY)
18539 return phba->pport;
18540
18541 vports = lpfc_create_vport_work_array(phba);
18542 if (vports != NULL) {
18543 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
18544 if (phba->fcf.fcfi == fcfi &&
18545 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
18546 vports[i]->fc_myDID == did) {
18547 vport = vports[i];
18548 break;
18549 }
18550 }
18551 }
18552 lpfc_destroy_vport_work_array(phba, vports);
18553 return vport;
18554}
18555
18556/**
18557 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
18558 * @vport: The vport to work on.
18559 *
18560 * This function updates the receive sequence time stamp for this vport. The
18561 * receive sequence time stamp indicates the time that the last frame of the
18562 * the sequence that has been idle for the longest amount of time was received.
18563 * the driver uses this time stamp to indicate if any received sequences have
18564 * timed out.
18565 **/
18566static void
18567lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
18568{
18569 struct lpfc_dmabuf *h_buf;
18570 struct hbq_dmabuf *dmabuf = NULL;
18571
18572 /* get the oldest sequence on the rcv list */
18573 h_buf = list_get_first(&vport->rcv_buffer_list,
18574 struct lpfc_dmabuf, list);
18575 if (!h_buf)
18576 return;
18577 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18578 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
18579}
18580
18581/**
18582 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
18583 * @vport: The vport that the received sequences were sent to.
18584 *
18585 * This function cleans up all outstanding received sequences. This is called
18586 * by the driver when a link event or user action invalidates all the received
18587 * sequences.
18588 **/
18589void
18590lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
18591{
18592 struct lpfc_dmabuf *h_buf, *hnext;
18593 struct lpfc_dmabuf *d_buf, *dnext;
18594 struct hbq_dmabuf *dmabuf = NULL;
18595
18596 /* start with the oldest sequence on the rcv list */
18597 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18598 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18599 list_del_init(&dmabuf->hbuf.list);
18600 list_for_each_entry_safe(d_buf, dnext,
18601 &dmabuf->dbuf.list, list) {
18602 list_del_init(&d_buf->list);
18603 lpfc_in_buf_free(vport->phba, d_buf);
18604 }
18605 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18606 }
18607}
18608
18609/**
18610 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
18611 * @vport: The vport that the received sequences were sent to.
18612 *
18613 * This function determines whether any received sequences have timed out by
18614 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
18615 * indicates that there is at least one timed out sequence this routine will
18616 * go through the received sequences one at a time from most inactive to most
18617 * active to determine which ones need to be cleaned up. Once it has determined
18618 * that a sequence needs to be cleaned up it will simply free up the resources
18619 * without sending an abort.
18620 **/
18621void
18622lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
18623{
18624 struct lpfc_dmabuf *h_buf, *hnext;
18625 struct lpfc_dmabuf *d_buf, *dnext;
18626 struct hbq_dmabuf *dmabuf = NULL;
18627 unsigned long timeout;
18628 int abort_count = 0;
18629
18630 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18631 vport->rcv_buffer_time_stamp);
18632 if (list_empty(&vport->rcv_buffer_list) ||
18633 time_before(jiffies, timeout))
18634 return;
18635 /* start with the oldest sequence on the rcv list */
18636 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18637 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18638 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18639 dmabuf->time_stamp);
18640 if (time_before(jiffies, timeout))
18641 break;
18642 abort_count++;
18643 list_del_init(&dmabuf->hbuf.list);
18644 list_for_each_entry_safe(d_buf, dnext,
18645 &dmabuf->dbuf.list, list) {
18646 list_del_init(&d_buf->list);
18647 lpfc_in_buf_free(vport->phba, d_buf);
18648 }
18649 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18650 }
18651 if (abort_count)
18652 lpfc_update_rcv_time_stamp(vport);
18653}
18654
18655/**
18656 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
18657 * @vport: pointer to a vitural port
18658 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
18659 *
18660 * This function searches through the existing incomplete sequences that have
18661 * been sent to this @vport. If the frame matches one of the incomplete
18662 * sequences then the dbuf in the @dmabuf is added to the list of frames that
18663 * make up that sequence. If no sequence is found that matches this frame then
18664 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
18665 * This function returns a pointer to the first dmabuf in the sequence list that
18666 * the frame was linked to.
18667 **/
18668static struct hbq_dmabuf *
18669lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18670{
18671 struct fc_frame_header *new_hdr;
18672 struct fc_frame_header *temp_hdr;
18673 struct lpfc_dmabuf *d_buf;
18674 struct lpfc_dmabuf *h_buf;
18675 struct hbq_dmabuf *seq_dmabuf = NULL;
18676 struct hbq_dmabuf *temp_dmabuf = NULL;
18677 uint8_t found = 0;
18678
18679 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18680 dmabuf->time_stamp = jiffies;
18681 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18682
18683 /* Use the hdr_buf to find the sequence that this frame belongs to */
18684 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18685 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18686 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18687 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18688 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18689 continue;
18690 /* found a pending sequence that matches this frame */
18691 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18692 break;
18693 }
18694 if (!seq_dmabuf) {
18695 /*
18696 * This indicates first frame received for this sequence.
18697 * Queue the buffer on the vport's rcv_buffer_list.
18698 */
18699 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18700 lpfc_update_rcv_time_stamp(vport);
18701 return dmabuf;
18702 }
18703 temp_hdr = seq_dmabuf->hbuf.virt;
18704 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
18705 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18706 list_del_init(&seq_dmabuf->hbuf.list);
18707 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18708 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18709 lpfc_update_rcv_time_stamp(vport);
18710 return dmabuf;
18711 }
18712 /* move this sequence to the tail to indicate a young sequence */
18713 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
18714 seq_dmabuf->time_stamp = jiffies;
18715 lpfc_update_rcv_time_stamp(vport);
18716 if (list_empty(&seq_dmabuf->dbuf.list)) {
18717 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18718 return seq_dmabuf;
18719 }
18720 /* find the correct place in the sequence to insert this frame */
18721 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
18722 while (!found) {
18723 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18724 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
18725 /*
18726 * If the frame's sequence count is greater than the frame on
18727 * the list then insert the frame right after this frame
18728 */
18729 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
18730 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18731 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18732 found = 1;
18733 break;
18734 }
18735
18736 if (&d_buf->list == &seq_dmabuf->dbuf.list)
18737 break;
18738 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18739 }
18740
18741 if (found)
18742 return seq_dmabuf;
18743 return NULL;
18744}
18745
18746/**
18747 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18748 * @vport: pointer to a vitural port
18749 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18750 *
18751 * This function tries to abort from the partially assembed sequence, described
18752 * by the information from basic abbort @dmabuf. It checks to see whether such
18753 * partially assembled sequence held by the driver. If so, it shall free up all
18754 * the frames from the partially assembled sequence.
18755 *
18756 * Return
18757 * true -- if there is matching partially assembled sequence present and all
18758 * the frames freed with the sequence;
18759 * false -- if there is no matching partially assembled sequence present so
18760 * nothing got aborted in the lower layer driver
18761 **/
18762static bool
18763lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18764 struct hbq_dmabuf *dmabuf)
18765{
18766 struct fc_frame_header *new_hdr;
18767 struct fc_frame_header *temp_hdr;
18768 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18769 struct hbq_dmabuf *seq_dmabuf = NULL;
18770
18771 /* Use the hdr_buf to find the sequence that matches this frame */
18772 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18773 INIT_LIST_HEAD(&dmabuf->hbuf.list);
18774 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18775 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18776 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18777 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18778 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18779 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18780 continue;
18781 /* found a pending sequence that matches this frame */
18782 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18783 break;
18784 }
18785
18786 /* Free up all the frames from the partially assembled sequence */
18787 if (seq_dmabuf) {
18788 list_for_each_entry_safe(d_buf, n_buf,
18789 &seq_dmabuf->dbuf.list, list) {
18790 list_del_init(&d_buf->list);
18791 lpfc_in_buf_free(vport->phba, d_buf);
18792 }
18793 return true;
18794 }
18795 return false;
18796}
18797
18798/**
18799 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18800 * @vport: pointer to a vitural port
18801 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18802 *
18803 * This function tries to abort from the assembed sequence from upper level
18804 * protocol, described by the information from basic abbort @dmabuf. It
18805 * checks to see whether such pending context exists at upper level protocol.
18806 * If so, it shall clean up the pending context.
18807 *
18808 * Return
18809 * true -- if there is matching pending context of the sequence cleaned
18810 * at ulp;
18811 * false -- if there is no matching pending context of the sequence present
18812 * at ulp.
18813 **/
18814static bool
18815lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18816{
18817 struct lpfc_hba *phba = vport->phba;
18818 int handled;
18819
18820 /* Accepting abort at ulp with SLI4 only */
18821 if (phba->sli_rev < LPFC_SLI_REV4)
18822 return false;
18823
18824 /* Register all caring upper level protocols to attend abort */
18825 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18826 if (handled)
18827 return true;
18828
18829 return false;
18830}
18831
18832/**
18833 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18834 * @phba: Pointer to HBA context object.
18835 * @cmd_iocbq: pointer to the command iocbq structure.
18836 * @rsp_iocbq: pointer to the response iocbq structure.
18837 *
18838 * This function handles the sequence abort response iocb command complete
18839 * event. It properly releases the memory allocated to the sequence abort
18840 * accept iocb.
18841 **/
18842static void
18843lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18844 struct lpfc_iocbq *cmd_iocbq,
18845 struct lpfc_iocbq *rsp_iocbq)
18846{
18847 if (cmd_iocbq) {
18848 lpfc_nlp_put(cmd_iocbq->ndlp);
18849 lpfc_sli_release_iocbq(phba, cmd_iocbq);
18850 }
18851
18852 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18853 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18854 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18855 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
18856 get_job_ulpstatus(phba, rsp_iocbq),
18857 get_job_word4(phba, rsp_iocbq));
18858}
18859
18860/**
18861 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18862 * @phba: Pointer to HBA context object.
18863 * @xri: xri id in transaction.
18864 *
18865 * This function validates the xri maps to the known range of XRIs allocated an
18866 * used by the driver.
18867 **/
18868uint16_t
18869lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18870 uint16_t xri)
18871{
18872 uint16_t i;
18873
18874 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18875 if (xri == phba->sli4_hba.xri_ids[i])
18876 return i;
18877 }
18878 return NO_XRI;
18879}
18880
18881/**
18882 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18883 * @vport: pointer to a virtual port.
18884 * @fc_hdr: pointer to a FC frame header.
18885 * @aborted: was the partially assembled receive sequence successfully aborted
18886 *
18887 * This function sends a basic response to a previous unsol sequence abort
18888 * event after aborting the sequence handling.
18889 **/
18890void
18891lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18892 struct fc_frame_header *fc_hdr, bool aborted)
18893{
18894 struct lpfc_hba *phba = vport->phba;
18895 struct lpfc_iocbq *ctiocb = NULL;
18896 struct lpfc_nodelist *ndlp;
18897 uint16_t oxid, rxid, xri, lxri;
18898 uint32_t sid, fctl;
18899 union lpfc_wqe128 *icmd;
18900 int rc;
18901
18902 if (!lpfc_is_link_up(phba))
18903 return;
18904
18905 sid = sli4_sid_from_fc_hdr(fc_hdr);
18906 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18907 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18908
18909 ndlp = lpfc_findnode_did(vport, sid);
18910 if (!ndlp) {
18911 ndlp = lpfc_nlp_init(vport, sid);
18912 if (!ndlp) {
18913 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18914 "1268 Failed to allocate ndlp for "
18915 "oxid:x%x SID:x%x\n", oxid, sid);
18916 return;
18917 }
18918 /* Put ndlp onto vport node list */
18919 lpfc_enqueue_node(vport, ndlp);
18920 }
18921
18922 /* Allocate buffer for rsp iocb */
18923 ctiocb = lpfc_sli_get_iocbq(phba);
18924 if (!ctiocb)
18925 return;
18926
18927 icmd = &ctiocb->wqe;
18928
18929 /* Extract the F_CTL field from FC_HDR */
18930 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18931
18932 ctiocb->ndlp = lpfc_nlp_get(ndlp);
18933 if (!ctiocb->ndlp) {
18934 lpfc_sli_release_iocbq(phba, ctiocb);
18935 return;
18936 }
18937
18938 ctiocb->vport = vport;
18939 ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18940 ctiocb->sli4_lxritag = NO_XRI;
18941 ctiocb->sli4_xritag = NO_XRI;
18942 ctiocb->abort_rctl = FC_RCTL_BA_ACC;
18943
18944 if (fctl & FC_FC_EX_CTX)
18945 /* Exchange responder sent the abort so we
18946 * own the oxid.
18947 */
18948 xri = oxid;
18949 else
18950 xri = rxid;
18951 lxri = lpfc_sli4_xri_inrange(phba, xri);
18952 if (lxri != NO_XRI)
18953 lpfc_set_rrq_active(phba, ndlp, lxri,
18954 (xri == oxid) ? rxid : oxid, 0);
18955 /* For BA_ABTS from exchange responder, if the logical xri with
18956 * the oxid maps to the FCP XRI range, the port no longer has
18957 * that exchange context, send a BLS_RJT. Override the IOCB for
18958 * a BA_RJT.
18959 */
18960 if ((fctl & FC_FC_EX_CTX) &&
18961 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18962 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18963 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18964 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18965 FC_BA_RJT_INV_XID);
18966 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18967 FC_BA_RJT_UNABLE);
18968 }
18969
18970 /* If BA_ABTS failed to abort a partially assembled receive sequence,
18971 * the driver no longer has that exchange, send a BLS_RJT. Override
18972 * the IOCB for a BA_RJT.
18973 */
18974 if (aborted == false) {
18975 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18976 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18977 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18978 FC_BA_RJT_INV_XID);
18979 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18980 FC_BA_RJT_UNABLE);
18981 }
18982
18983 if (fctl & FC_FC_EX_CTX) {
18984 /* ABTS sent by responder to CT exchange, construction
18985 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18986 * field and RX_ID from ABTS for RX_ID field.
18987 */
18988 ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
18989 bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
18990 } else {
18991 /* ABTS sent by initiator to CT exchange, construction
18992 * of BA_ACC will need to allocate a new XRI as for the
18993 * XRI_TAG field.
18994 */
18995 ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
18996 }
18997
18998 /* OX_ID is invariable to who sent ABTS to CT exchange */
18999 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
19000 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
19001
19002 /* Use CT=VPI */
19003 bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
19004 ndlp->nlp_DID);
19005 bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
19006 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
19007 bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
19008
19009 /* Xmit CT abts response on exchange <xid> */
19010 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
19011 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
19012 ctiocb->abort_rctl, oxid, phba->link_state);
19013
19014 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
19015 if (rc == IOCB_ERROR) {
19016 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19017 "2925 Failed to issue CT ABTS RSP x%x on "
19018 "xri x%x, Data x%x\n",
19019 ctiocb->abort_rctl, oxid,
19020 phba->link_state);
19021 lpfc_nlp_put(ndlp);
19022 ctiocb->ndlp = NULL;
19023 lpfc_sli_release_iocbq(phba, ctiocb);
19024 }
19025
19026 /* if only usage of this nodelist is BLS response, release initial ref
19027 * to free ndlp when transmit completes
19028 */
19029 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE &&
19030 !(ndlp->nlp_flag & NLP_DROPPED) &&
19031 !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) {
19032 ndlp->nlp_flag |= NLP_DROPPED;
19033 lpfc_nlp_put(ndlp);
19034 }
19035}
19036
19037/**
19038 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
19039 * @vport: Pointer to the vport on which this sequence was received
19040 * @dmabuf: pointer to a dmabuf that describes the FC sequence
19041 *
19042 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
19043 * receive sequence is only partially assembed by the driver, it shall abort
19044 * the partially assembled frames for the sequence. Otherwise, if the
19045 * unsolicited receive sequence has been completely assembled and passed to
19046 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
19047 * unsolicited sequence has been aborted. After that, it will issue a basic
19048 * accept to accept the abort.
19049 **/
19050static void
19051lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
19052 struct hbq_dmabuf *dmabuf)
19053{
19054 struct lpfc_hba *phba = vport->phba;
19055 struct fc_frame_header fc_hdr;
19056 uint32_t fctl;
19057 bool aborted;
19058
19059 /* Make a copy of fc_hdr before the dmabuf being released */
19060 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
19061 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
19062
19063 if (fctl & FC_FC_EX_CTX) {
19064 /* ABTS by responder to exchange, no cleanup needed */
19065 aborted = true;
19066 } else {
19067 /* ABTS by initiator to exchange, need to do cleanup */
19068 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
19069 if (aborted == false)
19070 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
19071 }
19072 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19073
19074 if (phba->nvmet_support) {
19075 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
19076 return;
19077 }
19078
19079 /* Respond with BA_ACC or BA_RJT accordingly */
19080 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
19081}
19082
19083/**
19084 * lpfc_seq_complete - Indicates if a sequence is complete
19085 * @dmabuf: pointer to a dmabuf that describes the FC sequence
19086 *
19087 * This function checks the sequence, starting with the frame described by
19088 * @dmabuf, to see if all the frames associated with this sequence are present.
19089 * the frames associated with this sequence are linked to the @dmabuf using the
19090 * dbuf list. This function looks for two major things. 1) That the first frame
19091 * has a sequence count of zero. 2) There is a frame with last frame of sequence
19092 * set. 3) That there are no holes in the sequence count. The function will
19093 * return 1 when the sequence is complete, otherwise it will return 0.
19094 **/
19095static int
19096lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
19097{
19098 struct fc_frame_header *hdr;
19099 struct lpfc_dmabuf *d_buf;
19100 struct hbq_dmabuf *seq_dmabuf;
19101 uint32_t fctl;
19102 int seq_count = 0;
19103
19104 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19105 /* make sure first fame of sequence has a sequence count of zero */
19106 if (hdr->fh_seq_cnt != seq_count)
19107 return 0;
19108 fctl = (hdr->fh_f_ctl[0] << 16 |
19109 hdr->fh_f_ctl[1] << 8 |
19110 hdr->fh_f_ctl[2]);
19111 /* If last frame of sequence we can return success. */
19112 if (fctl & FC_FC_END_SEQ)
19113 return 1;
19114 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
19115 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19116 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19117 /* If there is a hole in the sequence count then fail. */
19118 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
19119 return 0;
19120 fctl = (hdr->fh_f_ctl[0] << 16 |
19121 hdr->fh_f_ctl[1] << 8 |
19122 hdr->fh_f_ctl[2]);
19123 /* If last frame of sequence we can return success. */
19124 if (fctl & FC_FC_END_SEQ)
19125 return 1;
19126 }
19127 return 0;
19128}
19129
19130/**
19131 * lpfc_prep_seq - Prep sequence for ULP processing
19132 * @vport: Pointer to the vport on which this sequence was received
19133 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
19134 *
19135 * This function takes a sequence, described by a list of frames, and creates
19136 * a list of iocbq structures to describe the sequence. This iocbq list will be
19137 * used to issue to the generic unsolicited sequence handler. This routine
19138 * returns a pointer to the first iocbq in the list. If the function is unable
19139 * to allocate an iocbq then it throw out the received frames that were not
19140 * able to be described and return a pointer to the first iocbq. If unable to
19141 * allocate any iocbqs (including the first) this function will return NULL.
19142 **/
19143static struct lpfc_iocbq *
19144lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
19145{
19146 struct hbq_dmabuf *hbq_buf;
19147 struct lpfc_dmabuf *d_buf, *n_buf;
19148 struct lpfc_iocbq *first_iocbq, *iocbq;
19149 struct fc_frame_header *fc_hdr;
19150 uint32_t sid;
19151 uint32_t len, tot_len;
19152
19153 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19154 /* remove from receive buffer list */
19155 list_del_init(&seq_dmabuf->hbuf.list);
19156 lpfc_update_rcv_time_stamp(vport);
19157 /* get the Remote Port's SID */
19158 sid = sli4_sid_from_fc_hdr(fc_hdr);
19159 tot_len = 0;
19160 /* Get an iocbq struct to fill in. */
19161 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
19162 if (first_iocbq) {
19163 /* Initialize the first IOCB. */
19164 first_iocbq->wcqe_cmpl.total_data_placed = 0;
19165 bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
19166 IOSTAT_SUCCESS);
19167 first_iocbq->vport = vport;
19168
19169 /* Check FC Header to see what TYPE of frame we are rcv'ing */
19170 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
19171 bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
19172 sli4_did_from_fc_hdr(fc_hdr));
19173 }
19174
19175 bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
19176 NO_XRI);
19177 bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
19178 be16_to_cpu(fc_hdr->fh_ox_id));
19179
19180 /* put the first buffer into the first iocb */
19181 tot_len = bf_get(lpfc_rcqe_length,
19182 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
19183
19184 first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
19185 first_iocbq->bpl_dmabuf = NULL;
19186 /* Keep track of the BDE count */
19187 first_iocbq->wcqe_cmpl.word3 = 1;
19188
19189 if (tot_len > LPFC_DATA_BUF_SIZE)
19190 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
19191 LPFC_DATA_BUF_SIZE;
19192 else
19193 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
19194
19195 first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
19196 bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
19197 sid);
19198 }
19199 iocbq = first_iocbq;
19200 /*
19201 * Each IOCBq can have two Buffers assigned, so go through the list
19202 * of buffers for this sequence and save two buffers in each IOCBq
19203 */
19204 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
19205 if (!iocbq) {
19206 lpfc_in_buf_free(vport->phba, d_buf);
19207 continue;
19208 }
19209 if (!iocbq->bpl_dmabuf) {
19210 iocbq->bpl_dmabuf = d_buf;
19211 iocbq->wcqe_cmpl.word3++;
19212 /* We need to get the size out of the right CQE */
19213 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19214 len = bf_get(lpfc_rcqe_length,
19215 &hbq_buf->cq_event.cqe.rcqe_cmpl);
19216 iocbq->unsol_rcv_len = len;
19217 iocbq->wcqe_cmpl.total_data_placed += len;
19218 tot_len += len;
19219 } else {
19220 iocbq = lpfc_sli_get_iocbq(vport->phba);
19221 if (!iocbq) {
19222 if (first_iocbq) {
19223 bf_set(lpfc_wcqe_c_status,
19224 &first_iocbq->wcqe_cmpl,
19225 IOSTAT_SUCCESS);
19226 first_iocbq->wcqe_cmpl.parameter =
19227 IOERR_NO_RESOURCES;
19228 }
19229 lpfc_in_buf_free(vport->phba, d_buf);
19230 continue;
19231 }
19232 /* We need to get the size out of the right CQE */
19233 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19234 len = bf_get(lpfc_rcqe_length,
19235 &hbq_buf->cq_event.cqe.rcqe_cmpl);
19236 iocbq->cmd_dmabuf = d_buf;
19237 iocbq->bpl_dmabuf = NULL;
19238 iocbq->wcqe_cmpl.word3 = 1;
19239
19240 if (len > LPFC_DATA_BUF_SIZE)
19241 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
19242 LPFC_DATA_BUF_SIZE;
19243 else
19244 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
19245 len;
19246
19247 tot_len += len;
19248 iocbq->wcqe_cmpl.total_data_placed = tot_len;
19249 bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
19250 sid);
19251 list_add_tail(&iocbq->list, &first_iocbq->list);
19252 }
19253 }
19254 /* Free the sequence's header buffer */
19255 if (!first_iocbq)
19256 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
19257
19258 return first_iocbq;
19259}
19260
19261static void
19262lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
19263 struct hbq_dmabuf *seq_dmabuf)
19264{
19265 struct fc_frame_header *fc_hdr;
19266 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
19267 struct lpfc_hba *phba = vport->phba;
19268
19269 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19270 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
19271 if (!iocbq) {
19272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19273 "2707 Ring %d handler: Failed to allocate "
19274 "iocb Rctl x%x Type x%x received\n",
19275 LPFC_ELS_RING,
19276 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19277 return;
19278 }
19279 if (!lpfc_complete_unsol_iocb(phba,
19280 phba->sli4_hba.els_wq->pring,
19281 iocbq, fc_hdr->fh_r_ctl,
19282 fc_hdr->fh_type)) {
19283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19284 "2540 Ring %d handler: unexpected Rctl "
19285 "x%x Type x%x received\n",
19286 LPFC_ELS_RING,
19287 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19288 lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
19289 }
19290
19291 /* Free iocb created in lpfc_prep_seq */
19292 list_for_each_entry_safe(curr_iocb, next_iocb,
19293 &iocbq->list, list) {
19294 list_del_init(&curr_iocb->list);
19295 lpfc_sli_release_iocbq(phba, curr_iocb);
19296 }
19297 lpfc_sli_release_iocbq(phba, iocbq);
19298}
19299
19300static void
19301lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
19302 struct lpfc_iocbq *rspiocb)
19303{
19304 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
19305
19306 if (pcmd && pcmd->virt)
19307 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19308 kfree(pcmd);
19309 lpfc_sli_release_iocbq(phba, cmdiocb);
19310 lpfc_drain_txq(phba);
19311}
19312
19313static void
19314lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
19315 struct hbq_dmabuf *dmabuf)
19316{
19317 struct fc_frame_header *fc_hdr;
19318 struct lpfc_hba *phba = vport->phba;
19319 struct lpfc_iocbq *iocbq = NULL;
19320 union lpfc_wqe128 *pwqe;
19321 struct lpfc_dmabuf *pcmd = NULL;
19322 uint32_t frame_len;
19323 int rc;
19324 unsigned long iflags;
19325
19326 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19327 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
19328
19329 /* Send the received frame back */
19330 iocbq = lpfc_sli_get_iocbq(phba);
19331 if (!iocbq) {
19332 /* Queue cq event and wakeup worker thread to process it */
19333 spin_lock_irqsave(&phba->hbalock, iflags);
19334 list_add_tail(&dmabuf->cq_event.list,
19335 &phba->sli4_hba.sp_queue_event);
19336 spin_unlock_irqrestore(&phba->hbalock, iflags);
19337 set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
19338 lpfc_worker_wake_up(phba);
19339 return;
19340 }
19341
19342 /* Allocate buffer for command payload */
19343 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
19344 if (pcmd)
19345 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
19346 &pcmd->phys);
19347 if (!pcmd || !pcmd->virt)
19348 goto exit;
19349
19350 INIT_LIST_HEAD(&pcmd->list);
19351
19352 /* copyin the payload */
19353 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
19354
19355 iocbq->cmd_dmabuf = pcmd;
19356 iocbq->vport = vport;
19357 iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
19358 iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
19359 iocbq->num_bdes = 0;
19360
19361 pwqe = &iocbq->wqe;
19362 /* fill in BDE's for command */
19363 pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
19364 pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
19365 pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
19366 pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
19367
19368 pwqe->send_frame.frame_len = frame_len;
19369 pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
19370 pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
19371 pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
19372 pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
19373 pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
19374 pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
19375
19376 pwqe->generic.wqe_com.word7 = 0;
19377 pwqe->generic.wqe_com.word10 = 0;
19378
19379 bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
19380 bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
19381 bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
19382 bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
19383 bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
19384 bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
19385 bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
19386 bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
19387 bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
19388 bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
19389 bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
19390 bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
19391 pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
19392
19393 iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
19394
19395 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
19396 if (rc == IOCB_ERROR)
19397 goto exit;
19398
19399 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19400 return;
19401
19402exit:
19403 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
19404 "2023 Unable to process MDS loopback frame\n");
19405 if (pcmd && pcmd->virt)
19406 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19407 kfree(pcmd);
19408 if (iocbq)
19409 lpfc_sli_release_iocbq(phba, iocbq);
19410 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19411}
19412
19413/**
19414 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
19415 * @phba: Pointer to HBA context object.
19416 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
19417 *
19418 * This function is called with no lock held. This function processes all
19419 * the received buffers and gives it to upper layers when a received buffer
19420 * indicates that it is the final frame in the sequence. The interrupt
19421 * service routine processes received buffers at interrupt contexts.
19422 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
19423 * appropriate receive function when the final frame in a sequence is received.
19424 **/
19425void
19426lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
19427 struct hbq_dmabuf *dmabuf)
19428{
19429 struct hbq_dmabuf *seq_dmabuf;
19430 struct fc_frame_header *fc_hdr;
19431 struct lpfc_vport *vport;
19432 uint32_t fcfi;
19433 uint32_t did;
19434
19435 /* Process each received buffer */
19436 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19437
19438 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
19439 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
19440 vport = phba->pport;
19441 /* Handle MDS Loopback frames */
19442 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
19443 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19444 else
19445 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19446 return;
19447 }
19448
19449 /* check to see if this a valid type of frame */
19450 if (lpfc_fc_frame_check(phba, fc_hdr)) {
19451 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19452 return;
19453 }
19454
19455 if ((bf_get(lpfc_cqe_code,
19456 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
19457 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
19458 &dmabuf->cq_event.cqe.rcqe_cmpl);
19459 else
19460 fcfi = bf_get(lpfc_rcqe_fcf_id,
19461 &dmabuf->cq_event.cqe.rcqe_cmpl);
19462
19463 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
19464 vport = phba->pport;
19465 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
19466 "2023 MDS Loopback %d bytes\n",
19467 bf_get(lpfc_rcqe_length,
19468 &dmabuf->cq_event.cqe.rcqe_cmpl));
19469 /* Handle MDS Loopback frames */
19470 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19471 return;
19472 }
19473
19474 /* d_id this frame is directed to */
19475 did = sli4_did_from_fc_hdr(fc_hdr);
19476
19477 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
19478 if (!vport) {
19479 /* throw out the frame */
19480 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19481 return;
19482 }
19483
19484 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
19485 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
19486 (did != Fabric_DID)) {
19487 /*
19488 * Throw out the frame if we are not pt2pt.
19489 * The pt2pt protocol allows for discovery frames
19490 * to be received without a registered VPI.
19491 */
19492 if (!test_bit(FC_PT2PT, &vport->fc_flag) ||
19493 phba->link_state == LPFC_HBA_READY) {
19494 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19495 return;
19496 }
19497 }
19498
19499 /* Handle the basic abort sequence (BA_ABTS) event */
19500 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
19501 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
19502 return;
19503 }
19504
19505 /* Link this frame */
19506 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
19507 if (!seq_dmabuf) {
19508 /* unable to add frame to vport - throw it out */
19509 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19510 return;
19511 }
19512 /* If not last frame in sequence continue processing frames. */
19513 if (!lpfc_seq_complete(seq_dmabuf))
19514 return;
19515
19516 /* Send the complete sequence to the upper layer protocol */
19517 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
19518}
19519
19520/**
19521 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
19522 * @phba: pointer to lpfc hba data structure.
19523 *
19524 * This routine is invoked to post rpi header templates to the
19525 * HBA consistent with the SLI-4 interface spec. This routine
19526 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19527 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19528 *
19529 * This routine does not require any locks. It's usage is expected
19530 * to be driver load or reset recovery when the driver is
19531 * sequential.
19532 *
19533 * Return codes
19534 * 0 - successful
19535 * -EIO - The mailbox failed to complete successfully.
19536 * When this error occurs, the driver is not guaranteed
19537 * to have any rpi regions posted to the device and
19538 * must either attempt to repost the regions or take a
19539 * fatal error.
19540 **/
19541int
19542lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
19543{
19544 struct lpfc_rpi_hdr *rpi_page;
19545 uint32_t rc = 0;
19546 uint16_t lrpi = 0;
19547
19548 /* SLI4 ports that support extents do not require RPI headers. */
19549 if (!phba->sli4_hba.rpi_hdrs_in_use)
19550 goto exit;
19551 if (phba->sli4_hba.extents_in_use)
19552 return -EIO;
19553
19554 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
19555 /*
19556 * Assign the rpi headers a physical rpi only if the driver
19557 * has not initialized those resources. A port reset only
19558 * needs the headers posted.
19559 */
19560 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
19561 LPFC_RPI_RSRC_RDY)
19562 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19563
19564 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
19565 if (rc != MBX_SUCCESS) {
19566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19567 "2008 Error %d posting all rpi "
19568 "headers\n", rc);
19569 rc = -EIO;
19570 break;
19571 }
19572 }
19573
19574 exit:
19575 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
19576 LPFC_RPI_RSRC_RDY);
19577 return rc;
19578}
19579
19580/**
19581 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
19582 * @phba: pointer to lpfc hba data structure.
19583 * @rpi_page: pointer to the rpi memory region.
19584 *
19585 * This routine is invoked to post a single rpi header to the
19586 * HBA consistent with the SLI-4 interface spec. This memory region
19587 * maps up to 64 rpi context regions.
19588 *
19589 * Return codes
19590 * 0 - successful
19591 * -ENOMEM - No available memory
19592 * -EIO - The mailbox failed to complete successfully.
19593 **/
19594int
19595lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
19596{
19597 LPFC_MBOXQ_t *mboxq;
19598 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
19599 uint32_t rc = 0;
19600 uint32_t shdr_status, shdr_add_status;
19601 union lpfc_sli4_cfg_shdr *shdr;
19602
19603 /* SLI4 ports that support extents do not require RPI headers. */
19604 if (!phba->sli4_hba.rpi_hdrs_in_use)
19605 return rc;
19606 if (phba->sli4_hba.extents_in_use)
19607 return -EIO;
19608
19609 /* The port is notified of the header region via a mailbox command. */
19610 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19611 if (!mboxq) {
19612 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19613 "2001 Unable to allocate memory for issuing "
19614 "SLI_CONFIG_SPECIAL mailbox command\n");
19615 return -ENOMEM;
19616 }
19617
19618 /* Post all rpi memory regions to the port. */
19619 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
19620 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19621 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
19622 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
19623 sizeof(struct lpfc_sli4_cfg_mhdr),
19624 LPFC_SLI4_MBX_EMBED);
19625
19626
19627 /* Post the physical rpi to the port for this rpi header. */
19628 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
19629 rpi_page->start_rpi);
19630 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
19631 hdr_tmpl, rpi_page->page_count);
19632
19633 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
19634 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
19635 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19636 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
19637 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19638 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19639 mempool_free(mboxq, phba->mbox_mem_pool);
19640 if (shdr_status || shdr_add_status || rc) {
19641 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19642 "2514 POST_RPI_HDR mailbox failed with "
19643 "status x%x add_status x%x, mbx status x%x\n",
19644 shdr_status, shdr_add_status, rc);
19645 rc = -ENXIO;
19646 } else {
19647 /*
19648 * The next_rpi stores the next logical module-64 rpi value used
19649 * to post physical rpis in subsequent rpi postings.
19650 */
19651 spin_lock_irq(&phba->hbalock);
19652 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
19653 spin_unlock_irq(&phba->hbalock);
19654 }
19655 return rc;
19656}
19657
19658/**
19659 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
19660 * @phba: pointer to lpfc hba data structure.
19661 *
19662 * This routine is invoked to post rpi header templates to the
19663 * HBA consistent with the SLI-4 interface spec. This routine
19664 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19665 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19666 *
19667 * Returns
19668 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
19669 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
19670 **/
19671int
19672lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
19673{
19674 unsigned long rpi;
19675 uint16_t max_rpi, rpi_limit;
19676 uint16_t rpi_remaining, lrpi = 0;
19677 struct lpfc_rpi_hdr *rpi_hdr;
19678 unsigned long iflag;
19679
19680 /*
19681 * Fetch the next logical rpi. Because this index is logical,
19682 * the driver starts at 0 each time.
19683 */
19684 spin_lock_irqsave(&phba->hbalock, iflag);
19685 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
19686 rpi_limit = phba->sli4_hba.next_rpi;
19687
19688 rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
19689 if (rpi >= rpi_limit)
19690 rpi = LPFC_RPI_ALLOC_ERROR;
19691 else {
19692 set_bit(rpi, phba->sli4_hba.rpi_bmask);
19693 phba->sli4_hba.max_cfg_param.rpi_used++;
19694 phba->sli4_hba.rpi_count++;
19695 }
19696 lpfc_printf_log(phba, KERN_INFO,
19697 LOG_NODE | LOG_DISCOVERY,
19698 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
19699 (int) rpi, max_rpi, rpi_limit);
19700
19701 /*
19702 * Don't try to allocate more rpi header regions if the device limit
19703 * has been exhausted.
19704 */
19705 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
19706 (phba->sli4_hba.rpi_count >= max_rpi)) {
19707 spin_unlock_irqrestore(&phba->hbalock, iflag);
19708 return rpi;
19709 }
19710
19711 /*
19712 * RPI header postings are not required for SLI4 ports capable of
19713 * extents.
19714 */
19715 if (!phba->sli4_hba.rpi_hdrs_in_use) {
19716 spin_unlock_irqrestore(&phba->hbalock, iflag);
19717 return rpi;
19718 }
19719
19720 /*
19721 * If the driver is running low on rpi resources, allocate another
19722 * page now. Note that the next_rpi value is used because
19723 * it represents how many are actually in use whereas max_rpi notes
19724 * how many are supported max by the device.
19725 */
19726 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
19727 spin_unlock_irqrestore(&phba->hbalock, iflag);
19728 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
19729 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
19730 if (!rpi_hdr) {
19731 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19732 "2002 Error Could not grow rpi "
19733 "count\n");
19734 } else {
19735 lrpi = rpi_hdr->start_rpi;
19736 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19737 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
19738 }
19739 }
19740
19741 return rpi;
19742}
19743
19744/**
19745 * __lpfc_sli4_free_rpi - Release an rpi for reuse.
19746 * @phba: pointer to lpfc hba data structure.
19747 * @rpi: rpi to free
19748 *
19749 * This routine is invoked to release an rpi to the pool of
19750 * available rpis maintained by the driver.
19751 **/
19752static void
19753__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19754{
19755 /*
19756 * if the rpi value indicates a prior unreg has already
19757 * been done, skip the unreg.
19758 */
19759 if (rpi == LPFC_RPI_ALLOC_ERROR)
19760 return;
19761
19762 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19763 phba->sli4_hba.rpi_count--;
19764 phba->sli4_hba.max_cfg_param.rpi_used--;
19765 } else {
19766 lpfc_printf_log(phba, KERN_INFO,
19767 LOG_NODE | LOG_DISCOVERY,
19768 "2016 rpi %x not inuse\n",
19769 rpi);
19770 }
19771}
19772
19773/**
19774 * lpfc_sli4_free_rpi - Release an rpi for reuse.
19775 * @phba: pointer to lpfc hba data structure.
19776 * @rpi: rpi to free
19777 *
19778 * This routine is invoked to release an rpi to the pool of
19779 * available rpis maintained by the driver.
19780 **/
19781void
19782lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19783{
19784 spin_lock_irq(&phba->hbalock);
19785 __lpfc_sli4_free_rpi(phba, rpi);
19786 spin_unlock_irq(&phba->hbalock);
19787}
19788
19789/**
19790 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19791 * @phba: pointer to lpfc hba data structure.
19792 *
19793 * This routine is invoked to remove the memory region that
19794 * provided rpi via a bitmask.
19795 **/
19796void
19797lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19798{
19799 kfree(phba->sli4_hba.rpi_bmask);
19800 kfree(phba->sli4_hba.rpi_ids);
19801 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19802}
19803
19804/**
19805 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19806 * @ndlp: pointer to lpfc nodelist data structure.
19807 * @cmpl: completion call-back.
19808 * @iocbq: data to load as mbox ctx_u information
19809 *
19810 * This routine is invoked to remove the memory region that
19811 * provided rpi via a bitmask.
19812 **/
19813int
19814lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19815 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *),
19816 struct lpfc_iocbq *iocbq)
19817{
19818 LPFC_MBOXQ_t *mboxq;
19819 struct lpfc_hba *phba = ndlp->phba;
19820 int rc;
19821
19822 /* The port is notified of the header region via a mailbox command. */
19823 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19824 if (!mboxq)
19825 return -ENOMEM;
19826
19827 /* If cmpl assigned, then this nlp_get pairs with
19828 * lpfc_mbx_cmpl_resume_rpi.
19829 *
19830 * Else cmpl is NULL, then this nlp_get pairs with
19831 * lpfc_sli_def_mbox_cmpl.
19832 */
19833 if (!lpfc_nlp_get(ndlp)) {
19834 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19835 "2122 %s: Failed to get nlp ref\n",
19836 __func__);
19837 mempool_free(mboxq, phba->mbox_mem_pool);
19838 return -EIO;
19839 }
19840
19841 /* Post all rpi memory regions to the port. */
19842 lpfc_resume_rpi(mboxq, ndlp);
19843 if (cmpl) {
19844 mboxq->mbox_cmpl = cmpl;
19845 mboxq->ctx_u.save_iocb = iocbq;
19846 } else
19847 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19848 mboxq->ctx_ndlp = ndlp;
19849 mboxq->vport = ndlp->vport;
19850 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19851 if (rc == MBX_NOT_FINISHED) {
19852 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19853 "2010 Resume RPI Mailbox failed "
19854 "status %d, mbxStatus x%x\n", rc,
19855 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19856 lpfc_nlp_put(ndlp);
19857 mempool_free(mboxq, phba->mbox_mem_pool);
19858 return -EIO;
19859 }
19860 return 0;
19861}
19862
19863/**
19864 * lpfc_sli4_init_vpi - Initialize a vpi with the port
19865 * @vport: Pointer to the vport for which the vpi is being initialized
19866 *
19867 * This routine is invoked to activate a vpi with the port.
19868 *
19869 * Returns:
19870 * 0 success
19871 * -Evalue otherwise
19872 **/
19873int
19874lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19875{
19876 LPFC_MBOXQ_t *mboxq;
19877 int rc = 0;
19878 int retval = MBX_SUCCESS;
19879 uint32_t mbox_tmo;
19880 struct lpfc_hba *phba = vport->phba;
19881 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19882 if (!mboxq)
19883 return -ENOMEM;
19884 lpfc_init_vpi(phba, mboxq, vport->vpi);
19885 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19886 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19887 if (rc != MBX_SUCCESS) {
19888 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19889 "2022 INIT VPI Mailbox failed "
19890 "status %d, mbxStatus x%x\n", rc,
19891 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19892 retval = -EIO;
19893 }
19894 if (rc != MBX_TIMEOUT)
19895 mempool_free(mboxq, vport->phba->mbox_mem_pool);
19896
19897 return retval;
19898}
19899
19900/**
19901 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19902 * @phba: pointer to lpfc hba data structure.
19903 * @mboxq: Pointer to mailbox object.
19904 *
19905 * This routine is invoked to manually add a single FCF record. The caller
19906 * must pass a completely initialized FCF_Record. This routine takes
19907 * care of the nonembedded mailbox operations.
19908 **/
19909static void
19910lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19911{
19912 void *virt_addr;
19913 union lpfc_sli4_cfg_shdr *shdr;
19914 uint32_t shdr_status, shdr_add_status;
19915
19916 virt_addr = mboxq->sge_array->addr[0];
19917 /* The IOCTL status is embedded in the mailbox subheader. */
19918 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19919 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19920 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19921
19922 if ((shdr_status || shdr_add_status) &&
19923 (shdr_status != STATUS_FCF_IN_USE))
19924 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19925 "2558 ADD_FCF_RECORD mailbox failed with "
19926 "status x%x add_status x%x\n",
19927 shdr_status, shdr_add_status);
19928
19929 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19930}
19931
19932/**
19933 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19934 * @phba: pointer to lpfc hba data structure.
19935 * @fcf_record: pointer to the initialized fcf record to add.
19936 *
19937 * This routine is invoked to manually add a single FCF record. The caller
19938 * must pass a completely initialized FCF_Record. This routine takes
19939 * care of the nonembedded mailbox operations.
19940 **/
19941int
19942lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19943{
19944 int rc = 0;
19945 LPFC_MBOXQ_t *mboxq;
19946 uint8_t *bytep;
19947 void *virt_addr;
19948 struct lpfc_mbx_sge sge;
19949 uint32_t alloc_len, req_len;
19950 uint32_t fcfindex;
19951
19952 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19953 if (!mboxq) {
19954 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19955 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19956 return -ENOMEM;
19957 }
19958
19959 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19960 sizeof(uint32_t);
19961
19962 /* Allocate DMA memory and set up the non-embedded mailbox command */
19963 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19964 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19965 req_len, LPFC_SLI4_MBX_NEMBED);
19966 if (alloc_len < req_len) {
19967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19968 "2523 Allocated DMA memory size (x%x) is "
19969 "less than the requested DMA memory "
19970 "size (x%x)\n", alloc_len, req_len);
19971 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19972 return -ENOMEM;
19973 }
19974
19975 /*
19976 * Get the first SGE entry from the non-embedded DMA memory. This
19977 * routine only uses a single SGE.
19978 */
19979 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19980 virt_addr = mboxq->sge_array->addr[0];
19981 /*
19982 * Configure the FCF record for FCFI 0. This is the driver's
19983 * hardcoded default and gets used in nonFIP mode.
19984 */
19985 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19986 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19987 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19988
19989 /*
19990 * Copy the fcf_index and the FCF Record Data. The data starts after
19991 * the FCoE header plus word10. The data copy needs to be endian
19992 * correct.
19993 */
19994 bytep += sizeof(uint32_t);
19995 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19996 mboxq->vport = phba->pport;
19997 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19998 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19999 if (rc == MBX_NOT_FINISHED) {
20000 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20001 "2515 ADD_FCF_RECORD mailbox failed with "
20002 "status 0x%x\n", rc);
20003 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20004 rc = -EIO;
20005 } else
20006 rc = 0;
20007
20008 return rc;
20009}
20010
20011/**
20012 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
20013 * @phba: pointer to lpfc hba data structure.
20014 * @fcf_record: pointer to the fcf record to write the default data.
20015 * @fcf_index: FCF table entry index.
20016 *
20017 * This routine is invoked to build the driver's default FCF record. The
20018 * values used are hardcoded. This routine handles memory initialization.
20019 *
20020 **/
20021void
20022lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
20023 struct fcf_record *fcf_record,
20024 uint16_t fcf_index)
20025{
20026 memset(fcf_record, 0, sizeof(struct fcf_record));
20027 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
20028 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
20029 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
20030 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
20031 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
20032 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
20033 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
20034 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
20035 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
20036 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
20037 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
20038 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
20039 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
20040 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
20041 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
20042 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
20043 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
20044 /* Set the VLAN bit map */
20045 if (phba->valid_vlan) {
20046 fcf_record->vlan_bitmap[phba->vlan_id / 8]
20047 = 1 << (phba->vlan_id % 8);
20048 }
20049}
20050
20051/**
20052 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
20053 * @phba: pointer to lpfc hba data structure.
20054 * @fcf_index: FCF table entry offset.
20055 *
20056 * This routine is invoked to scan the entire FCF table by reading FCF
20057 * record and processing it one at a time starting from the @fcf_index
20058 * for initial FCF discovery or fast FCF failover rediscovery.
20059 *
20060 * Return 0 if the mailbox command is submitted successfully, none 0
20061 * otherwise.
20062 **/
20063int
20064lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20065{
20066 int rc = 0, error;
20067 LPFC_MBOXQ_t *mboxq;
20068
20069 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
20070 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
20071 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20072 if (!mboxq) {
20073 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20074 "2000 Failed to allocate mbox for "
20075 "READ_FCF cmd\n");
20076 error = -ENOMEM;
20077 goto fail_fcf_scan;
20078 }
20079 /* Construct the read FCF record mailbox command */
20080 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20081 if (rc) {
20082 error = -EINVAL;
20083 goto fail_fcf_scan;
20084 }
20085 /* Issue the mailbox command asynchronously */
20086 mboxq->vport = phba->pport;
20087 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
20088
20089 set_bit(FCF_TS_INPROG, &phba->hba_flag);
20090
20091 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20092 if (rc == MBX_NOT_FINISHED)
20093 error = -EIO;
20094 else {
20095 /* Reset eligible FCF count for new scan */
20096 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
20097 phba->fcf.eligible_fcf_cnt = 0;
20098 error = 0;
20099 }
20100fail_fcf_scan:
20101 if (error) {
20102 if (mboxq)
20103 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20104 /* FCF scan failed, clear FCF_TS_INPROG flag */
20105 clear_bit(FCF_TS_INPROG, &phba->hba_flag);
20106 }
20107 return error;
20108}
20109
20110/**
20111 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
20112 * @phba: pointer to lpfc hba data structure.
20113 * @fcf_index: FCF table entry offset.
20114 *
20115 * This routine is invoked to read an FCF record indicated by @fcf_index
20116 * and to use it for FLOGI roundrobin FCF failover.
20117 *
20118 * Return 0 if the mailbox command is submitted successfully, none 0
20119 * otherwise.
20120 **/
20121int
20122lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20123{
20124 int rc = 0, error;
20125 LPFC_MBOXQ_t *mboxq;
20126
20127 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20128 if (!mboxq) {
20129 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20130 "2763 Failed to allocate mbox for "
20131 "READ_FCF cmd\n");
20132 error = -ENOMEM;
20133 goto fail_fcf_read;
20134 }
20135 /* Construct the read FCF record mailbox command */
20136 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20137 if (rc) {
20138 error = -EINVAL;
20139 goto fail_fcf_read;
20140 }
20141 /* Issue the mailbox command asynchronously */
20142 mboxq->vport = phba->pport;
20143 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
20144 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20145 if (rc == MBX_NOT_FINISHED)
20146 error = -EIO;
20147 else
20148 error = 0;
20149
20150fail_fcf_read:
20151 if (error && mboxq)
20152 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20153 return error;
20154}
20155
20156/**
20157 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
20158 * @phba: pointer to lpfc hba data structure.
20159 * @fcf_index: FCF table entry offset.
20160 *
20161 * This routine is invoked to read an FCF record indicated by @fcf_index to
20162 * determine whether it's eligible for FLOGI roundrobin failover list.
20163 *
20164 * Return 0 if the mailbox command is submitted successfully, none 0
20165 * otherwise.
20166 **/
20167int
20168lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20169{
20170 int rc = 0, error;
20171 LPFC_MBOXQ_t *mboxq;
20172
20173 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20174 if (!mboxq) {
20175 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20176 "2758 Failed to allocate mbox for "
20177 "READ_FCF cmd\n");
20178 error = -ENOMEM;
20179 goto fail_fcf_read;
20180 }
20181 /* Construct the read FCF record mailbox command */
20182 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20183 if (rc) {
20184 error = -EINVAL;
20185 goto fail_fcf_read;
20186 }
20187 /* Issue the mailbox command asynchronously */
20188 mboxq->vport = phba->pport;
20189 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
20190 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20191 if (rc == MBX_NOT_FINISHED)
20192 error = -EIO;
20193 else
20194 error = 0;
20195
20196fail_fcf_read:
20197 if (error && mboxq)
20198 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20199 return error;
20200}
20201
20202/**
20203 * lpfc_check_next_fcf_pri_level
20204 * @phba: pointer to the lpfc_hba struct for this port.
20205 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
20206 * routine when the rr_bmask is empty. The FCF indecies are put into the
20207 * rr_bmask based on their priority level. Starting from the highest priority
20208 * to the lowest. The most likely FCF candidate will be in the highest
20209 * priority group. When this routine is called it searches the fcf_pri list for
20210 * next lowest priority group and repopulates the rr_bmask with only those
20211 * fcf_indexes.
20212 * returns:
20213 * 1=success 0=failure
20214 **/
20215static int
20216lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
20217{
20218 uint16_t next_fcf_pri;
20219 uint16_t last_index;
20220 struct lpfc_fcf_pri *fcf_pri;
20221 int rc;
20222 int ret = 0;
20223
20224 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20225 LPFC_SLI4_FCF_TBL_INDX_MAX);
20226 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20227 "3060 Last IDX %d\n", last_index);
20228
20229 /* Verify the priority list has 2 or more entries */
20230 spin_lock_irq(&phba->hbalock);
20231 if (list_empty(&phba->fcf.fcf_pri_list) ||
20232 list_is_singular(&phba->fcf.fcf_pri_list)) {
20233 spin_unlock_irq(&phba->hbalock);
20234 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20235 "3061 Last IDX %d\n", last_index);
20236 return 0; /* Empty rr list */
20237 }
20238 spin_unlock_irq(&phba->hbalock);
20239
20240 next_fcf_pri = 0;
20241 /*
20242 * Clear the rr_bmask and set all of the bits that are at this
20243 * priority.
20244 */
20245 memset(phba->fcf.fcf_rr_bmask, 0,
20246 sizeof(*phba->fcf.fcf_rr_bmask));
20247 spin_lock_irq(&phba->hbalock);
20248 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20249 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
20250 continue;
20251 /*
20252 * the 1st priority that has not FLOGI failed
20253 * will be the highest.
20254 */
20255 if (!next_fcf_pri)
20256 next_fcf_pri = fcf_pri->fcf_rec.priority;
20257 spin_unlock_irq(&phba->hbalock);
20258 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20259 rc = lpfc_sli4_fcf_rr_index_set(phba,
20260 fcf_pri->fcf_rec.fcf_index);
20261 if (rc)
20262 return 0;
20263 }
20264 spin_lock_irq(&phba->hbalock);
20265 }
20266 /*
20267 * if next_fcf_pri was not set above and the list is not empty then
20268 * we have failed flogis on all of them. So reset flogi failed
20269 * and start at the beginning.
20270 */
20271 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
20272 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20273 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
20274 /*
20275 * the 1st priority that has not FLOGI failed
20276 * will be the highest.
20277 */
20278 if (!next_fcf_pri)
20279 next_fcf_pri = fcf_pri->fcf_rec.priority;
20280 spin_unlock_irq(&phba->hbalock);
20281 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20282 rc = lpfc_sli4_fcf_rr_index_set(phba,
20283 fcf_pri->fcf_rec.fcf_index);
20284 if (rc)
20285 return 0;
20286 }
20287 spin_lock_irq(&phba->hbalock);
20288 }
20289 } else
20290 ret = 1;
20291 spin_unlock_irq(&phba->hbalock);
20292
20293 return ret;
20294}
20295/**
20296 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
20297 * @phba: pointer to lpfc hba data structure.
20298 *
20299 * This routine is to get the next eligible FCF record index in a round
20300 * robin fashion. If the next eligible FCF record index equals to the
20301 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
20302 * shall be returned, otherwise, the next eligible FCF record's index
20303 * shall be returned.
20304 **/
20305uint16_t
20306lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
20307{
20308 uint16_t next_fcf_index;
20309
20310initial_priority:
20311 /* Search start from next bit of currently registered FCF index */
20312 next_fcf_index = phba->fcf.current_rec.fcf_indx;
20313
20314next_priority:
20315 /* Determine the next fcf index to check */
20316 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
20317 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
20318 LPFC_SLI4_FCF_TBL_INDX_MAX,
20319 next_fcf_index);
20320
20321 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
20322 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20323 /*
20324 * If we have wrapped then we need to clear the bits that
20325 * have been tested so that we can detect when we should
20326 * change the priority level.
20327 */
20328 next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20329 LPFC_SLI4_FCF_TBL_INDX_MAX);
20330 }
20331
20332
20333 /* Check roundrobin failover list empty condition */
20334 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
20335 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
20336 /*
20337 * If next fcf index is not found check if there are lower
20338 * Priority level fcf's in the fcf_priority list.
20339 * Set up the rr_bmask with all of the avaiable fcf bits
20340 * at that level and continue the selection process.
20341 */
20342 if (lpfc_check_next_fcf_pri_level(phba))
20343 goto initial_priority;
20344 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
20345 "2844 No roundrobin failover FCF available\n");
20346
20347 return LPFC_FCOE_FCF_NEXT_NONE;
20348 }
20349
20350 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
20351 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
20352 LPFC_FCF_FLOGI_FAILED) {
20353 if (list_is_singular(&phba->fcf.fcf_pri_list))
20354 return LPFC_FCOE_FCF_NEXT_NONE;
20355
20356 goto next_priority;
20357 }
20358
20359 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20360 "2845 Get next roundrobin failover FCF (x%x)\n",
20361 next_fcf_index);
20362
20363 return next_fcf_index;
20364}
20365
20366/**
20367 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
20368 * @phba: pointer to lpfc hba data structure.
20369 * @fcf_index: index into the FCF table to 'set'
20370 *
20371 * This routine sets the FCF record index in to the eligible bmask for
20372 * roundrobin failover search. It checks to make sure that the index
20373 * does not go beyond the range of the driver allocated bmask dimension
20374 * before setting the bit.
20375 *
20376 * Returns 0 if the index bit successfully set, otherwise, it returns
20377 * -EINVAL.
20378 **/
20379int
20380lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
20381{
20382 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20383 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20384 "2610 FCF (x%x) reached driver's book "
20385 "keeping dimension:x%x\n",
20386 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20387 return -EINVAL;
20388 }
20389 /* Set the eligible FCF record index bmask */
20390 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20391
20392 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20393 "2790 Set FCF (x%x) to roundrobin FCF failover "
20394 "bmask\n", fcf_index);
20395
20396 return 0;
20397}
20398
20399/**
20400 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
20401 * @phba: pointer to lpfc hba data structure.
20402 * @fcf_index: index into the FCF table to 'clear'
20403 *
20404 * This routine clears the FCF record index from the eligible bmask for
20405 * roundrobin failover search. It checks to make sure that the index
20406 * does not go beyond the range of the driver allocated bmask dimension
20407 * before clearing the bit.
20408 **/
20409void
20410lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
20411{
20412 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
20413 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20414 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20415 "2762 FCF (x%x) reached driver's book "
20416 "keeping dimension:x%x\n",
20417 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20418 return;
20419 }
20420 /* Clear the eligible FCF record index bmask */
20421 spin_lock_irq(&phba->hbalock);
20422 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
20423 list) {
20424 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
20425 list_del_init(&fcf_pri->list);
20426 break;
20427 }
20428 }
20429 spin_unlock_irq(&phba->hbalock);
20430 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20431
20432 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20433 "2791 Clear FCF (x%x) from roundrobin failover "
20434 "bmask\n", fcf_index);
20435}
20436
20437/**
20438 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
20439 * @phba: pointer to lpfc hba data structure.
20440 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
20441 *
20442 * This routine is the completion routine for the rediscover FCF table mailbox
20443 * command. If the mailbox command returned failure, it will try to stop the
20444 * FCF rediscover wait timer.
20445 **/
20446static void
20447lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
20448{
20449 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20450 uint32_t shdr_status, shdr_add_status;
20451
20452 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20453
20454 shdr_status = bf_get(lpfc_mbox_hdr_status,
20455 &redisc_fcf->header.cfg_shdr.response);
20456 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20457 &redisc_fcf->header.cfg_shdr.response);
20458 if (shdr_status || shdr_add_status) {
20459 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20460 "2746 Requesting for FCF rediscovery failed "
20461 "status x%x add_status x%x\n",
20462 shdr_status, shdr_add_status);
20463 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
20464 spin_lock_irq(&phba->hbalock);
20465 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
20466 spin_unlock_irq(&phba->hbalock);
20467 /*
20468 * CVL event triggered FCF rediscover request failed,
20469 * last resort to re-try current registered FCF entry.
20470 */
20471 lpfc_retry_pport_discovery(phba);
20472 } else {
20473 spin_lock_irq(&phba->hbalock);
20474 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
20475 spin_unlock_irq(&phba->hbalock);
20476 /*
20477 * DEAD FCF event triggered FCF rediscover request
20478 * failed, last resort to fail over as a link down
20479 * to FCF registration.
20480 */
20481 lpfc_sli4_fcf_dead_failthrough(phba);
20482 }
20483 } else {
20484 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20485 "2775 Start FCF rediscover quiescent timer\n");
20486 /*
20487 * Start FCF rediscovery wait timer for pending FCF
20488 * before rescan FCF record table.
20489 */
20490 lpfc_fcf_redisc_wait_start_timer(phba);
20491 }
20492
20493 mempool_free(mbox, phba->mbox_mem_pool);
20494}
20495
20496/**
20497 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
20498 * @phba: pointer to lpfc hba data structure.
20499 *
20500 * This routine is invoked to request for rediscovery of the entire FCF table
20501 * by the port.
20502 **/
20503int
20504lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
20505{
20506 LPFC_MBOXQ_t *mbox;
20507 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20508 int rc, length;
20509
20510 /* Cancel retry delay timers to all vports before FCF rediscover */
20511 lpfc_cancel_all_vport_retry_delay_timer(phba);
20512
20513 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20514 if (!mbox) {
20515 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20516 "2745 Failed to allocate mbox for "
20517 "requesting FCF rediscover.\n");
20518 return -ENOMEM;
20519 }
20520
20521 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
20522 sizeof(struct lpfc_sli4_cfg_mhdr));
20523 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
20524 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
20525 length, LPFC_SLI4_MBX_EMBED);
20526
20527 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20528 /* Set count to 0 for invalidating the entire FCF database */
20529 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
20530
20531 /* Issue the mailbox command asynchronously */
20532 mbox->vport = phba->pport;
20533 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
20534 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
20535
20536 if (rc == MBX_NOT_FINISHED) {
20537 mempool_free(mbox, phba->mbox_mem_pool);
20538 return -EIO;
20539 }
20540 return 0;
20541}
20542
20543/**
20544 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
20545 * @phba: pointer to lpfc hba data structure.
20546 *
20547 * This function is the failover routine as a last resort to the FCF DEAD
20548 * event when driver failed to perform fast FCF failover.
20549 **/
20550void
20551lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
20552{
20553 uint32_t link_state;
20554
20555 /*
20556 * Last resort as FCF DEAD event failover will treat this as
20557 * a link down, but save the link state because we don't want
20558 * it to be changed to Link Down unless it is already down.
20559 */
20560 link_state = phba->link_state;
20561 lpfc_linkdown(phba);
20562 phba->link_state = link_state;
20563
20564 /* Unregister FCF if no devices connected to it */
20565 lpfc_unregister_unused_fcf(phba);
20566}
20567
20568/**
20569 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
20570 * @phba: pointer to lpfc hba data structure.
20571 * @rgn23_data: pointer to configure region 23 data.
20572 *
20573 * This function gets SLI3 port configure region 23 data through memory dump
20574 * mailbox command. When it successfully retrieves data, the size of the data
20575 * will be returned, otherwise, 0 will be returned.
20576 **/
20577static uint32_t
20578lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20579{
20580 LPFC_MBOXQ_t *pmb = NULL;
20581 MAILBOX_t *mb;
20582 uint32_t offset = 0;
20583 int rc;
20584
20585 if (!rgn23_data)
20586 return 0;
20587
20588 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20589 if (!pmb) {
20590 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20591 "2600 failed to allocate mailbox memory\n");
20592 return 0;
20593 }
20594 mb = &pmb->u.mb;
20595
20596 do {
20597 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
20598 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
20599
20600 if (rc != MBX_SUCCESS) {
20601 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20602 "2601 failed to read config "
20603 "region 23, rc 0x%x Status 0x%x\n",
20604 rc, mb->mbxStatus);
20605 mb->un.varDmp.word_cnt = 0;
20606 }
20607 /*
20608 * dump mem may return a zero when finished or we got a
20609 * mailbox error, either way we are done.
20610 */
20611 if (mb->un.varDmp.word_cnt == 0)
20612 break;
20613
20614 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
20615 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
20616
20617 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
20618 rgn23_data + offset,
20619 mb->un.varDmp.word_cnt);
20620 offset += mb->un.varDmp.word_cnt;
20621 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
20622
20623 mempool_free(pmb, phba->mbox_mem_pool);
20624 return offset;
20625}
20626
20627/**
20628 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
20629 * @phba: pointer to lpfc hba data structure.
20630 * @rgn23_data: pointer to configure region 23 data.
20631 *
20632 * This function gets SLI4 port configure region 23 data through memory dump
20633 * mailbox command. When it successfully retrieves data, the size of the data
20634 * will be returned, otherwise, 0 will be returned.
20635 **/
20636static uint32_t
20637lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20638{
20639 LPFC_MBOXQ_t *mboxq = NULL;
20640 struct lpfc_dmabuf *mp = NULL;
20641 struct lpfc_mqe *mqe;
20642 uint32_t data_length = 0;
20643 int rc;
20644
20645 if (!rgn23_data)
20646 return 0;
20647
20648 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20649 if (!mboxq) {
20650 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20651 "3105 failed to allocate mailbox memory\n");
20652 return 0;
20653 }
20654
20655 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
20656 goto out;
20657 mqe = &mboxq->u.mqe;
20658 mp = mboxq->ctx_buf;
20659 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
20660 if (rc)
20661 goto out;
20662 data_length = mqe->un.mb_words[5];
20663 if (data_length == 0)
20664 goto out;
20665 if (data_length > DMP_RGN23_SIZE) {
20666 data_length = 0;
20667 goto out;
20668 }
20669 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
20670out:
20671 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
20672 return data_length;
20673}
20674
20675/**
20676 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
20677 * @phba: pointer to lpfc hba data structure.
20678 *
20679 * This function read region 23 and parse TLV for port status to
20680 * decide if the user disaled the port. If the TLV indicates the
20681 * port is disabled, the hba_flag is set accordingly.
20682 **/
20683void
20684lpfc_sli_read_link_ste(struct lpfc_hba *phba)
20685{
20686 uint8_t *rgn23_data = NULL;
20687 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
20688 uint32_t offset = 0;
20689
20690 /* Get adapter Region 23 data */
20691 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
20692 if (!rgn23_data)
20693 goto out;
20694
20695 if (phba->sli_rev < LPFC_SLI_REV4)
20696 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
20697 else {
20698 if_type = bf_get(lpfc_sli_intf_if_type,
20699 &phba->sli4_hba.sli_intf);
20700 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
20701 goto out;
20702 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
20703 }
20704
20705 if (!data_size)
20706 goto out;
20707
20708 /* Check the region signature first */
20709 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
20710 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20711 "2619 Config region 23 has bad signature\n");
20712 goto out;
20713 }
20714 offset += 4;
20715
20716 /* Check the data structure version */
20717 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
20718 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20719 "2620 Config region 23 has bad version\n");
20720 goto out;
20721 }
20722 offset += 4;
20723
20724 /* Parse TLV entries in the region */
20725 while (offset < data_size) {
20726 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
20727 break;
20728 /*
20729 * If the TLV is not driver specific TLV or driver id is
20730 * not linux driver id, skip the record.
20731 */
20732 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
20733 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
20734 (rgn23_data[offset + 3] != 0)) {
20735 offset += rgn23_data[offset + 1] * 4 + 4;
20736 continue;
20737 }
20738
20739 /* Driver found a driver specific TLV in the config region */
20740 sub_tlv_len = rgn23_data[offset + 1] * 4;
20741 offset += 4;
20742 tlv_offset = 0;
20743
20744 /*
20745 * Search for configured port state sub-TLV.
20746 */
20747 while ((offset < data_size) &&
20748 (tlv_offset < sub_tlv_len)) {
20749 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20750 offset += 4;
20751 tlv_offset += 4;
20752 break;
20753 }
20754 if (rgn23_data[offset] != PORT_STE_TYPE) {
20755 offset += rgn23_data[offset + 1] * 4 + 4;
20756 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20757 continue;
20758 }
20759
20760 /* This HBA contains PORT_STE configured */
20761 if (!rgn23_data[offset + 2])
20762 set_bit(LINK_DISABLED, &phba->hba_flag);
20763
20764 goto out;
20765 }
20766 }
20767
20768out:
20769 kfree(rgn23_data);
20770 return;
20771}
20772
20773/**
20774 * lpfc_log_fw_write_cmpl - logs firmware write completion status
20775 * @phba: pointer to lpfc hba data structure
20776 * @shdr_status: wr_object rsp's status field
20777 * @shdr_add_status: wr_object rsp's add_status field
20778 * @shdr_add_status_2: wr_object rsp's add_status_2 field
20779 * @shdr_change_status: wr_object rsp's change_status field
20780 * @shdr_csf: wr_object rsp's csf bit
20781 *
20782 * This routine is intended to be called after a firmware write completes.
20783 * It will log next action items to be performed by the user to instantiate
20784 * the newly downloaded firmware or reason for incompatibility.
20785 **/
20786static void
20787lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20788 u32 shdr_add_status, u32 shdr_add_status_2,
20789 u32 shdr_change_status, u32 shdr_csf)
20790{
20791 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20792 "4198 %s: flash_id x%02x, asic_rev x%02x, "
20793 "status x%02x, add_status x%02x, add_status_2 x%02x, "
20794 "change_status x%02x, csf %01x\n", __func__,
20795 phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20796 shdr_status, shdr_add_status, shdr_add_status_2,
20797 shdr_change_status, shdr_csf);
20798
20799 if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20800 switch (shdr_add_status_2) {
20801 case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20802 lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20803 "4199 Firmware write failed: "
20804 "image incompatible with flash x%02x\n",
20805 phba->sli4_hba.flash_id);
20806 break;
20807 case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20808 lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20809 "4200 Firmware write failed: "
20810 "image incompatible with ASIC "
20811 "architecture x%02x\n",
20812 phba->sli4_hba.asic_rev);
20813 break;
20814 default:
20815 lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20816 "4210 Firmware write failed: "
20817 "add_status_2 x%02x\n",
20818 shdr_add_status_2);
20819 break;
20820 }
20821 } else if (!shdr_status && !shdr_add_status) {
20822 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20823 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20824 if (shdr_csf)
20825 shdr_change_status =
20826 LPFC_CHANGE_STATUS_PCI_RESET;
20827 }
20828
20829 switch (shdr_change_status) {
20830 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20831 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20832 "3198 Firmware write complete: System "
20833 "reboot required to instantiate\n");
20834 break;
20835 case (LPFC_CHANGE_STATUS_FW_RESET):
20836 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20837 "3199 Firmware write complete: "
20838 "Firmware reset required to "
20839 "instantiate\n");
20840 break;
20841 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20842 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20843 "3200 Firmware write complete: Port "
20844 "Migration or PCI Reset required to "
20845 "instantiate\n");
20846 break;
20847 case (LPFC_CHANGE_STATUS_PCI_RESET):
20848 lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20849 "3201 Firmware write complete: PCI "
20850 "Reset required to instantiate\n");
20851 break;
20852 default:
20853 break;
20854 }
20855 }
20856}
20857
20858/**
20859 * lpfc_wr_object - write an object to the firmware
20860 * @phba: HBA structure that indicates port to create a queue on.
20861 * @dmabuf_list: list of dmabufs to write to the port.
20862 * @size: the total byte value of the objects to write to the port.
20863 * @offset: the current offset to be used to start the transfer.
20864 *
20865 * This routine will create a wr_object mailbox command to send to the port.
20866 * the mailbox command will be constructed using the dma buffers described in
20867 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20868 * BDEs that the imbedded mailbox can support. The @offset variable will be
20869 * used to indicate the starting offset of the transfer and will also return
20870 * the offset after the write object mailbox has completed. @size is used to
20871 * determine the end of the object and whether the eof bit should be set.
20872 *
20873 * Return 0 is successful and offset will contain the new offset to use
20874 * for the next write.
20875 * Return negative value for error cases.
20876 **/
20877int
20878lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20879 uint32_t size, uint32_t *offset)
20880{
20881 struct lpfc_mbx_wr_object *wr_object;
20882 LPFC_MBOXQ_t *mbox;
20883 int rc = 0, i = 0;
20884 int mbox_status = 0;
20885 uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20886 uint32_t shdr_change_status = 0, shdr_csf = 0;
20887 uint32_t mbox_tmo;
20888 struct lpfc_dmabuf *dmabuf;
20889 uint32_t written = 0;
20890 bool check_change_status = false;
20891
20892 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20893 if (!mbox)
20894 return -ENOMEM;
20895
20896 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20897 LPFC_MBOX_OPCODE_WRITE_OBJECT,
20898 sizeof(struct lpfc_mbx_wr_object) -
20899 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20900
20901 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20902 wr_object->u.request.write_offset = *offset;
20903 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20904 wr_object->u.request.object_name[0] =
20905 cpu_to_le32(wr_object->u.request.object_name[0]);
20906 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20907 list_for_each_entry(dmabuf, dmabuf_list, list) {
20908 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20909 break;
20910 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20911 wr_object->u.request.bde[i].addrHigh =
20912 putPaddrHigh(dmabuf->phys);
20913 if (written + SLI4_PAGE_SIZE >= size) {
20914 wr_object->u.request.bde[i].tus.f.bdeSize =
20915 (size - written);
20916 written += (size - written);
20917 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20918 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20919 check_change_status = true;
20920 } else {
20921 wr_object->u.request.bde[i].tus.f.bdeSize =
20922 SLI4_PAGE_SIZE;
20923 written += SLI4_PAGE_SIZE;
20924 }
20925 i++;
20926 }
20927 wr_object->u.request.bde_count = i;
20928 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20929 if (!phba->sli4_hba.intr_enable)
20930 mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20931 else {
20932 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20933 mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20934 }
20935
20936 /* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */
20937 rc = mbox_status;
20938
20939 /* The IOCTL status is embedded in the mailbox subheader. */
20940 shdr_status = bf_get(lpfc_mbox_hdr_status,
20941 &wr_object->header.cfg_shdr.response);
20942 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20943 &wr_object->header.cfg_shdr.response);
20944 shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
20945 &wr_object->header.cfg_shdr.response);
20946 if (check_change_status) {
20947 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20948 &wr_object->u.response);
20949 shdr_csf = bf_get(lpfc_wr_object_csf,
20950 &wr_object->u.response);
20951 }
20952
20953 if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
20954 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20955 "3025 Write Object mailbox failed with "
20956 "status x%x add_status x%x, add_status_2 x%x, "
20957 "mbx status x%x\n",
20958 shdr_status, shdr_add_status, shdr_add_status_2,
20959 rc);
20960 rc = -ENXIO;
20961 *offset = shdr_add_status;
20962 } else {
20963 *offset += wr_object->u.response.actual_write_length;
20964 }
20965
20966 if (rc || check_change_status)
20967 lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
20968 shdr_add_status_2, shdr_change_status,
20969 shdr_csf);
20970
20971 if (!phba->sli4_hba.intr_enable)
20972 mempool_free(mbox, phba->mbox_mem_pool);
20973 else if (mbox_status != MBX_TIMEOUT)
20974 mempool_free(mbox, phba->mbox_mem_pool);
20975
20976 return rc;
20977}
20978
20979/**
20980 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20981 * @vport: pointer to vport data structure.
20982 *
20983 * This function iterate through the mailboxq and clean up all REG_LOGIN
20984 * and REG_VPI mailbox commands associated with the vport. This function
20985 * is called when driver want to restart discovery of the vport due to
20986 * a Clear Virtual Link event.
20987 **/
20988void
20989lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20990{
20991 struct lpfc_hba *phba = vport->phba;
20992 LPFC_MBOXQ_t *mb, *nextmb;
20993 struct lpfc_nodelist *ndlp;
20994 struct lpfc_nodelist *act_mbx_ndlp = NULL;
20995 LIST_HEAD(mbox_cmd_list);
20996 uint8_t restart_loop;
20997
20998 /* Clean up internally queued mailbox commands with the vport */
20999 spin_lock_irq(&phba->hbalock);
21000 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
21001 if (mb->vport != vport)
21002 continue;
21003
21004 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
21005 (mb->u.mb.mbxCommand != MBX_REG_VPI))
21006 continue;
21007
21008 list_move_tail(&mb->list, &mbox_cmd_list);
21009 }
21010 /* Clean up active mailbox command with the vport */
21011 mb = phba->sli.mbox_active;
21012 if (mb && (mb->vport == vport)) {
21013 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
21014 (mb->u.mb.mbxCommand == MBX_REG_VPI))
21015 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21016 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21017 act_mbx_ndlp = mb->ctx_ndlp;
21018
21019 /* This reference is local to this routine. The
21020 * reference is removed at routine exit.
21021 */
21022 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
21023
21024 /* Unregister the RPI when mailbox complete */
21025 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
21026 }
21027 }
21028 /* Cleanup any mailbox completions which are not yet processed */
21029 do {
21030 restart_loop = 0;
21031 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
21032 /*
21033 * If this mailox is already processed or it is
21034 * for another vport ignore it.
21035 */
21036 if ((mb->vport != vport) ||
21037 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
21038 continue;
21039
21040 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
21041 (mb->u.mb.mbxCommand != MBX_REG_VPI))
21042 continue;
21043
21044 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21045 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21046 ndlp = mb->ctx_ndlp;
21047 /* Unregister the RPI when mailbox complete */
21048 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
21049 restart_loop = 1;
21050 spin_unlock_irq(&phba->hbalock);
21051 spin_lock(&ndlp->lock);
21052 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21053 spin_unlock(&ndlp->lock);
21054 spin_lock_irq(&phba->hbalock);
21055 break;
21056 }
21057 }
21058 } while (restart_loop);
21059
21060 spin_unlock_irq(&phba->hbalock);
21061
21062 /* Release the cleaned-up mailbox commands */
21063 while (!list_empty(&mbox_cmd_list)) {
21064 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
21065 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21066 ndlp = mb->ctx_ndlp;
21067 mb->ctx_ndlp = NULL;
21068 if (ndlp) {
21069 spin_lock(&ndlp->lock);
21070 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21071 spin_unlock(&ndlp->lock);
21072 lpfc_nlp_put(ndlp);
21073 }
21074 }
21075 lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
21076 }
21077
21078 /* Release the ndlp with the cleaned-up active mailbox command */
21079 if (act_mbx_ndlp) {
21080 spin_lock(&act_mbx_ndlp->lock);
21081 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21082 spin_unlock(&act_mbx_ndlp->lock);
21083 lpfc_nlp_put(act_mbx_ndlp);
21084 }
21085}
21086
21087/**
21088 * lpfc_drain_txq - Drain the txq
21089 * @phba: Pointer to HBA context object.
21090 *
21091 * This function attempt to submit IOCBs on the txq
21092 * to the adapter. For SLI4 adapters, the txq contains
21093 * ELS IOCBs that have been deferred because the there
21094 * are no SGLs. This congestion can occur with large
21095 * vport counts during node discovery.
21096 **/
21097
21098uint32_t
21099lpfc_drain_txq(struct lpfc_hba *phba)
21100{
21101 LIST_HEAD(completions);
21102 struct lpfc_sli_ring *pring;
21103 struct lpfc_iocbq *piocbq = NULL;
21104 unsigned long iflags = 0;
21105 char *fail_msg = NULL;
21106 uint32_t txq_cnt = 0;
21107 struct lpfc_queue *wq;
21108 int ret = 0;
21109
21110 if (phba->link_flag & LS_MDS_LOOPBACK) {
21111 /* MDS WQE are posted only to first WQ*/
21112 wq = phba->sli4_hba.hdwq[0].io_wq;
21113 if (unlikely(!wq))
21114 return 0;
21115 pring = wq->pring;
21116 } else {
21117 wq = phba->sli4_hba.els_wq;
21118 if (unlikely(!wq))
21119 return 0;
21120 pring = lpfc_phba_elsring(phba);
21121 }
21122
21123 if (unlikely(!pring) || list_empty(&pring->txq))
21124 return 0;
21125
21126 spin_lock_irqsave(&pring->ring_lock, iflags);
21127 list_for_each_entry(piocbq, &pring->txq, list) {
21128 txq_cnt++;
21129 }
21130
21131 if (txq_cnt > pring->txq_max)
21132 pring->txq_max = txq_cnt;
21133
21134 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21135
21136 while (!list_empty(&pring->txq)) {
21137 spin_lock_irqsave(&pring->ring_lock, iflags);
21138
21139 piocbq = lpfc_sli_ringtx_get(phba, pring);
21140 if (!piocbq) {
21141 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21142 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21143 "2823 txq empty and txq_cnt is %d\n ",
21144 txq_cnt);
21145 break;
21146 }
21147 txq_cnt--;
21148
21149 ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
21150
21151 if (ret && ret != IOCB_BUSY) {
21152 fail_msg = " - Cannot send IO ";
21153 piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21154 }
21155 if (fail_msg) {
21156 piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
21157 /* Failed means we can't issue and need to cancel */
21158 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21159 "2822 IOCB failed %s iotag 0x%x "
21160 "xri 0x%x %d flg x%x\n",
21161 fail_msg, piocbq->iotag,
21162 piocbq->sli4_xritag, ret,
21163 piocbq->cmd_flag);
21164 list_add_tail(&piocbq->list, &completions);
21165 fail_msg = NULL;
21166 }
21167 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21168 if (txq_cnt == 0 || ret == IOCB_BUSY)
21169 break;
21170 }
21171 /* Cancel all the IOCBs that cannot be issued */
21172 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
21173 IOERR_SLI_ABORTED);
21174
21175 return txq_cnt;
21176}
21177
21178/**
21179 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
21180 * @phba: Pointer to HBA context object.
21181 * @pwqeq: Pointer to command WQE.
21182 * @sglq: Pointer to the scatter gather queue object.
21183 *
21184 * This routine converts the bpl or bde that is in the WQE
21185 * to a sgl list for the sli4 hardware. The physical address
21186 * of the bpl/bde is converted back to a virtual address.
21187 * If the WQE contains a BPL then the list of BDE's is
21188 * converted to sli4_sge's. If the WQE contains a single
21189 * BDE then it is converted to a single sli_sge.
21190 * The WQE is still in cpu endianness so the contents of
21191 * the bpl can be used without byte swapping.
21192 *
21193 * Returns valid XRI = Success, NO_XRI = Failure.
21194 */
21195static uint16_t
21196lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
21197 struct lpfc_sglq *sglq)
21198{
21199 uint16_t xritag = NO_XRI;
21200 struct ulp_bde64 *bpl = NULL;
21201 struct ulp_bde64 bde;
21202 struct sli4_sge *sgl = NULL;
21203 struct lpfc_dmabuf *dmabuf;
21204 union lpfc_wqe128 *wqe;
21205 int numBdes = 0;
21206 int i = 0;
21207 uint32_t offset = 0; /* accumulated offset in the sg request list */
21208 int inbound = 0; /* number of sg reply entries inbound from firmware */
21209 uint32_t cmd;
21210
21211 if (!pwqeq || !sglq)
21212 return xritag;
21213
21214 sgl = (struct sli4_sge *)sglq->sgl;
21215 wqe = &pwqeq->wqe;
21216 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
21217
21218 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
21219 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
21220 return sglq->sli4_xritag;
21221 numBdes = pwqeq->num_bdes;
21222 if (numBdes) {
21223 /* The addrHigh and addrLow fields within the WQE
21224 * have not been byteswapped yet so there is no
21225 * need to swap them back.
21226 */
21227 if (pwqeq->bpl_dmabuf)
21228 dmabuf = pwqeq->bpl_dmabuf;
21229 else
21230 return xritag;
21231
21232 bpl = (struct ulp_bde64 *)dmabuf->virt;
21233 if (!bpl)
21234 return xritag;
21235
21236 for (i = 0; i < numBdes; i++) {
21237 /* Should already be byte swapped. */
21238 sgl->addr_hi = bpl->addrHigh;
21239 sgl->addr_lo = bpl->addrLow;
21240
21241 sgl->word2 = le32_to_cpu(sgl->word2);
21242 if ((i+1) == numBdes)
21243 bf_set(lpfc_sli4_sge_last, sgl, 1);
21244 else
21245 bf_set(lpfc_sli4_sge_last, sgl, 0);
21246 /* swap the size field back to the cpu so we
21247 * can assign it to the sgl.
21248 */
21249 bde.tus.w = le32_to_cpu(bpl->tus.w);
21250 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
21251 /* The offsets in the sgl need to be accumulated
21252 * separately for the request and reply lists.
21253 * The request is always first, the reply follows.
21254 */
21255 switch (cmd) {
21256 case CMD_GEN_REQUEST64_WQE:
21257 /* add up the reply sg entries */
21258 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
21259 inbound++;
21260 /* first inbound? reset the offset */
21261 if (inbound == 1)
21262 offset = 0;
21263 bf_set(lpfc_sli4_sge_offset, sgl, offset);
21264 bf_set(lpfc_sli4_sge_type, sgl,
21265 LPFC_SGE_TYPE_DATA);
21266 offset += bde.tus.f.bdeSize;
21267 break;
21268 case CMD_FCP_TRSP64_WQE:
21269 bf_set(lpfc_sli4_sge_offset, sgl, 0);
21270 bf_set(lpfc_sli4_sge_type, sgl,
21271 LPFC_SGE_TYPE_DATA);
21272 break;
21273 case CMD_FCP_TSEND64_WQE:
21274 case CMD_FCP_TRECEIVE64_WQE:
21275 bf_set(lpfc_sli4_sge_type, sgl,
21276 bpl->tus.f.bdeFlags);
21277 if (i < 3)
21278 offset = 0;
21279 else
21280 offset += bde.tus.f.bdeSize;
21281 bf_set(lpfc_sli4_sge_offset, sgl, offset);
21282 break;
21283 }
21284 sgl->word2 = cpu_to_le32(sgl->word2);
21285 bpl++;
21286 sgl++;
21287 }
21288 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
21289 /* The addrHigh and addrLow fields of the BDE have not
21290 * been byteswapped yet so they need to be swapped
21291 * before putting them in the sgl.
21292 */
21293 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
21294 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
21295 sgl->word2 = le32_to_cpu(sgl->word2);
21296 bf_set(lpfc_sli4_sge_last, sgl, 1);
21297 sgl->word2 = cpu_to_le32(sgl->word2);
21298 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
21299 }
21300 return sglq->sli4_xritag;
21301}
21302
21303/**
21304 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
21305 * @phba: Pointer to HBA context object.
21306 * @qp: Pointer to HDW queue.
21307 * @pwqe: Pointer to command WQE.
21308 **/
21309int
21310lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21311 struct lpfc_iocbq *pwqe)
21312{
21313 union lpfc_wqe128 *wqe = &pwqe->wqe;
21314 struct lpfc_async_xchg_ctx *ctxp;
21315 struct lpfc_queue *wq;
21316 struct lpfc_sglq *sglq;
21317 struct lpfc_sli_ring *pring;
21318 unsigned long iflags;
21319 uint32_t ret = 0;
21320
21321 /* NVME_LS and NVME_LS ABTS requests. */
21322 if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
21323 pring = phba->sli4_hba.nvmels_wq->pring;
21324 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21325 qp, wq_access);
21326 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
21327 if (!sglq) {
21328 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21329 return WQE_BUSY;
21330 }
21331 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21332 pwqe->sli4_xritag = sglq->sli4_xritag;
21333 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
21334 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21335 return WQE_ERROR;
21336 }
21337 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21338 pwqe->sli4_xritag);
21339 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
21340 if (ret) {
21341 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21342 return ret;
21343 }
21344
21345 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21346 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21347
21348 lpfc_sli4_poll_eq(qp->hba_eq);
21349 return 0;
21350 }
21351
21352 /* NVME_FCREQ and NVME_ABTS requests */
21353 if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
21354 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21355 wq = qp->io_wq;
21356 pring = wq->pring;
21357
21358 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21359
21360 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21361 qp, wq_access);
21362 ret = lpfc_sli4_wq_put(wq, wqe);
21363 if (ret) {
21364 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21365 return ret;
21366 }
21367 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21368 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21369
21370 lpfc_sli4_poll_eq(qp->hba_eq);
21371 return 0;
21372 }
21373
21374 /* NVMET requests */
21375 if (pwqe->cmd_flag & LPFC_IO_NVMET) {
21376 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21377 wq = qp->io_wq;
21378 pring = wq->pring;
21379
21380 ctxp = pwqe->context_un.axchg;
21381 sglq = ctxp->ctxbuf->sglq;
21382 if (pwqe->sli4_xritag == NO_XRI) {
21383 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21384 pwqe->sli4_xritag = sglq->sli4_xritag;
21385 }
21386 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21387 pwqe->sli4_xritag);
21388 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21389
21390 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21391 qp, wq_access);
21392 ret = lpfc_sli4_wq_put(wq, wqe);
21393 if (ret) {
21394 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21395 return ret;
21396 }
21397 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21398 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21399
21400 lpfc_sli4_poll_eq(qp->hba_eq);
21401 return 0;
21402 }
21403 return WQE_ERROR;
21404}
21405
21406/**
21407 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
21408 * @phba: Pointer to HBA context object.
21409 * @cmdiocb: Pointer to driver command iocb object.
21410 * @cmpl: completion function.
21411 *
21412 * Fill the appropriate fields for the abort WQE and call
21413 * internal routine lpfc_sli4_issue_wqe to send the WQE
21414 * This function is called with hbalock held and no ring_lock held.
21415 *
21416 * RETURNS 0 - SUCCESS
21417 **/
21418
21419int
21420lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
21421 void *cmpl)
21422{
21423 struct lpfc_vport *vport = cmdiocb->vport;
21424 struct lpfc_iocbq *abtsiocb = NULL;
21425 union lpfc_wqe128 *abtswqe;
21426 struct lpfc_io_buf *lpfc_cmd;
21427 int retval = IOCB_ERROR;
21428 u16 xritag = cmdiocb->sli4_xritag;
21429
21430 /*
21431 * The scsi command can not be in txq and it is in flight because the
21432 * pCmd is still pointing at the SCSI command we have to abort. There
21433 * is no need to search the txcmplq. Just send an abort to the FW.
21434 */
21435
21436 abtsiocb = __lpfc_sli_get_iocbq(phba);
21437 if (!abtsiocb)
21438 return WQE_NORESOURCE;
21439
21440 /* Indicate the IO is being aborted by the driver. */
21441 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
21442
21443 abtswqe = &abtsiocb->wqe;
21444 memset(abtswqe, 0, sizeof(*abtswqe));
21445
21446 if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
21447 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
21448 bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
21449 abtswqe->abort_cmd.rsrvd5 = 0;
21450 abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
21451 bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
21452 bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
21453 bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
21454 bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
21455 bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
21456 bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
21457
21458 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
21459 abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
21460 abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
21461 if (cmdiocb->cmd_flag & LPFC_IO_FCP)
21462 abtsiocb->cmd_flag |= LPFC_IO_FCP;
21463 if (cmdiocb->cmd_flag & LPFC_IO_NVME)
21464 abtsiocb->cmd_flag |= LPFC_IO_NVME;
21465 if (cmdiocb->cmd_flag & LPFC_IO_FOF)
21466 abtsiocb->cmd_flag |= LPFC_IO_FOF;
21467 abtsiocb->vport = vport;
21468 abtsiocb->cmd_cmpl = cmpl;
21469
21470 lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
21471 retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
21472
21473 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21474 "0359 Abort xri x%x, original iotag x%x, "
21475 "abort cmd iotag x%x retval x%x\n",
21476 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
21477
21478 if (retval) {
21479 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21480 __lpfc_sli_release_iocbq(phba, abtsiocb);
21481 }
21482
21483 return retval;
21484}
21485
21486#ifdef LPFC_MXP_STAT
21487/**
21488 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
21489 * @phba: pointer to lpfc hba data structure.
21490 * @hwqid: belong to which HWQ.
21491 *
21492 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
21493 * 15 seconds after a test case is running.
21494 *
21495 * The user should call lpfc_debugfs_multixripools_write before running a test
21496 * case to clear stat_snapshot_taken. Then the user starts a test case. During
21497 * test case is running, stat_snapshot_taken is incremented by 1 every time when
21498 * this routine is called from heartbeat timer. When stat_snapshot_taken is
21499 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
21500 **/
21501void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
21502{
21503 struct lpfc_sli4_hdw_queue *qp;
21504 struct lpfc_multixri_pool *multixri_pool;
21505 struct lpfc_pvt_pool *pvt_pool;
21506 struct lpfc_pbl_pool *pbl_pool;
21507 u32 txcmplq_cnt;
21508
21509 qp = &phba->sli4_hba.hdwq[hwqid];
21510 multixri_pool = qp->p_multixri_pool;
21511 if (!multixri_pool)
21512 return;
21513
21514 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
21515 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21516 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21517 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21518
21519 multixri_pool->stat_pbl_count = pbl_pool->count;
21520 multixri_pool->stat_pvt_count = pvt_pool->count;
21521 multixri_pool->stat_busy_count = txcmplq_cnt;
21522 }
21523
21524 multixri_pool->stat_snapshot_taken++;
21525}
21526#endif
21527
21528/**
21529 * lpfc_adjust_pvt_pool_count - Adjust private pool count
21530 * @phba: pointer to lpfc hba data structure.
21531 * @hwqid: belong to which HWQ.
21532 *
21533 * This routine moves some XRIs from private to public pool when private pool
21534 * is not busy.
21535 **/
21536void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
21537{
21538 struct lpfc_multixri_pool *multixri_pool;
21539 u32 io_req_count;
21540 u32 prev_io_req_count;
21541
21542 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21543 if (!multixri_pool)
21544 return;
21545 io_req_count = multixri_pool->io_req_count;
21546 prev_io_req_count = multixri_pool->prev_io_req_count;
21547
21548 if (prev_io_req_count != io_req_count) {
21549 /* Private pool is busy */
21550 multixri_pool->prev_io_req_count = io_req_count;
21551 } else {
21552 /* Private pool is not busy.
21553 * Move XRIs from private to public pool.
21554 */
21555 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
21556 }
21557}
21558
21559/**
21560 * lpfc_adjust_high_watermark - Adjust high watermark
21561 * @phba: pointer to lpfc hba data structure.
21562 * @hwqid: belong to which HWQ.
21563 *
21564 * This routine sets high watermark as number of outstanding XRIs,
21565 * but make sure the new value is between xri_limit/2 and xri_limit.
21566 **/
21567void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
21568{
21569 u32 new_watermark;
21570 u32 watermark_max;
21571 u32 watermark_min;
21572 u32 xri_limit;
21573 u32 txcmplq_cnt;
21574 u32 abts_io_bufs;
21575 struct lpfc_multixri_pool *multixri_pool;
21576 struct lpfc_sli4_hdw_queue *qp;
21577
21578 qp = &phba->sli4_hba.hdwq[hwqid];
21579 multixri_pool = qp->p_multixri_pool;
21580 if (!multixri_pool)
21581 return;
21582 xri_limit = multixri_pool->xri_limit;
21583
21584 watermark_max = xri_limit;
21585 watermark_min = xri_limit / 2;
21586
21587 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21588 abts_io_bufs = qp->abts_scsi_io_bufs;
21589 abts_io_bufs += qp->abts_nvme_io_bufs;
21590
21591 new_watermark = txcmplq_cnt + abts_io_bufs;
21592 new_watermark = min(watermark_max, new_watermark);
21593 new_watermark = max(watermark_min, new_watermark);
21594 multixri_pool->pvt_pool.high_watermark = new_watermark;
21595
21596#ifdef LPFC_MXP_STAT
21597 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
21598 new_watermark);
21599#endif
21600}
21601
21602/**
21603 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
21604 * @phba: pointer to lpfc hba data structure.
21605 * @hwqid: belong to which HWQ.
21606 *
21607 * This routine is called from hearbeat timer when pvt_pool is idle.
21608 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
21609 * The first step moves (all - low_watermark) amount of XRIs.
21610 * The second step moves the rest of XRIs.
21611 **/
21612void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
21613{
21614 struct lpfc_pbl_pool *pbl_pool;
21615 struct lpfc_pvt_pool *pvt_pool;
21616 struct lpfc_sli4_hdw_queue *qp;
21617 struct lpfc_io_buf *lpfc_ncmd;
21618 struct lpfc_io_buf *lpfc_ncmd_next;
21619 unsigned long iflag;
21620 struct list_head tmp_list;
21621 u32 tmp_count;
21622
21623 qp = &phba->sli4_hba.hdwq[hwqid];
21624 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21625 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21626 tmp_count = 0;
21627
21628 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
21629 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
21630
21631 if (pvt_pool->count > pvt_pool->low_watermark) {
21632 /* Step 1: move (all - low_watermark) from pvt_pool
21633 * to pbl_pool
21634 */
21635
21636 /* Move low watermark of bufs from pvt_pool to tmp_list */
21637 INIT_LIST_HEAD(&tmp_list);
21638 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21639 &pvt_pool->list, list) {
21640 list_move_tail(&lpfc_ncmd->list, &tmp_list);
21641 tmp_count++;
21642 if (tmp_count >= pvt_pool->low_watermark)
21643 break;
21644 }
21645
21646 /* Move all bufs from pvt_pool to pbl_pool */
21647 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21648
21649 /* Move all bufs from tmp_list to pvt_pool */
21650 list_splice(&tmp_list, &pvt_pool->list);
21651
21652 pbl_pool->count += (pvt_pool->count - tmp_count);
21653 pvt_pool->count = tmp_count;
21654 } else {
21655 /* Step 2: move the rest from pvt_pool to pbl_pool */
21656 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21657 pbl_pool->count += pvt_pool->count;
21658 pvt_pool->count = 0;
21659 }
21660
21661 spin_unlock(&pvt_pool->lock);
21662 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21663}
21664
21665/**
21666 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21667 * @phba: pointer to lpfc hba data structure
21668 * @qp: pointer to HDW queue
21669 * @pbl_pool: specified public free XRI pool
21670 * @pvt_pool: specified private free XRI pool
21671 * @count: number of XRIs to move
21672 *
21673 * This routine tries to move some free common bufs from the specified pbl_pool
21674 * to the specified pvt_pool. It might move less than count XRIs if there's not
21675 * enough in public pool.
21676 *
21677 * Return:
21678 * true - if XRIs are successfully moved from the specified pbl_pool to the
21679 * specified pvt_pool
21680 * false - if the specified pbl_pool is empty or locked by someone else
21681 **/
21682static bool
21683_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21684 struct lpfc_pbl_pool *pbl_pool,
21685 struct lpfc_pvt_pool *pvt_pool, u32 count)
21686{
21687 struct lpfc_io_buf *lpfc_ncmd;
21688 struct lpfc_io_buf *lpfc_ncmd_next;
21689 unsigned long iflag;
21690 int ret;
21691
21692 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
21693 if (ret) {
21694 if (pbl_pool->count) {
21695 /* Move a batch of XRIs from public to private pool */
21696 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
21697 list_for_each_entry_safe(lpfc_ncmd,
21698 lpfc_ncmd_next,
21699 &pbl_pool->list,
21700 list) {
21701 list_move_tail(&lpfc_ncmd->list,
21702 &pvt_pool->list);
21703 pvt_pool->count++;
21704 pbl_pool->count--;
21705 count--;
21706 if (count == 0)
21707 break;
21708 }
21709
21710 spin_unlock(&pvt_pool->lock);
21711 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21712 return true;
21713 }
21714 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21715 }
21716
21717 return false;
21718}
21719
21720/**
21721 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21722 * @phba: pointer to lpfc hba data structure.
21723 * @hwqid: belong to which HWQ.
21724 * @count: number of XRIs to move
21725 *
21726 * This routine tries to find some free common bufs in one of public pools with
21727 * Round Robin method. The search always starts from local hwqid, then the next
21728 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
21729 * a batch of free common bufs are moved to private pool on hwqid.
21730 * It might move less than count XRIs if there's not enough in public pool.
21731 **/
21732void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
21733{
21734 struct lpfc_multixri_pool *multixri_pool;
21735 struct lpfc_multixri_pool *next_multixri_pool;
21736 struct lpfc_pvt_pool *pvt_pool;
21737 struct lpfc_pbl_pool *pbl_pool;
21738 struct lpfc_sli4_hdw_queue *qp;
21739 u32 next_hwqid;
21740 u32 hwq_count;
21741 int ret;
21742
21743 qp = &phba->sli4_hba.hdwq[hwqid];
21744 multixri_pool = qp->p_multixri_pool;
21745 pvt_pool = &multixri_pool->pvt_pool;
21746 pbl_pool = &multixri_pool->pbl_pool;
21747
21748 /* Check if local pbl_pool is available */
21749 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
21750 if (ret) {
21751#ifdef LPFC_MXP_STAT
21752 multixri_pool->local_pbl_hit_count++;
21753#endif
21754 return;
21755 }
21756
21757 hwq_count = phba->cfg_hdw_queue;
21758
21759 /* Get the next hwqid which was found last time */
21760 next_hwqid = multixri_pool->rrb_next_hwqid;
21761
21762 do {
21763 /* Go to next hwq */
21764 next_hwqid = (next_hwqid + 1) % hwq_count;
21765
21766 next_multixri_pool =
21767 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21768 pbl_pool = &next_multixri_pool->pbl_pool;
21769
21770 /* Check if the public free xri pool is available */
21771 ret = _lpfc_move_xri_pbl_to_pvt(
21772 phba, qp, pbl_pool, pvt_pool, count);
21773
21774 /* Exit while-loop if success or all hwqid are checked */
21775 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21776
21777 /* Starting point for the next time */
21778 multixri_pool->rrb_next_hwqid = next_hwqid;
21779
21780 if (!ret) {
21781 /* stats: all public pools are empty*/
21782 multixri_pool->pbl_empty_count++;
21783 }
21784
21785#ifdef LPFC_MXP_STAT
21786 if (ret) {
21787 if (next_hwqid == hwqid)
21788 multixri_pool->local_pbl_hit_count++;
21789 else
21790 multixri_pool->other_pbl_hit_count++;
21791 }
21792#endif
21793}
21794
21795/**
21796 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
21797 * @phba: pointer to lpfc hba data structure.
21798 * @hwqid: belong to which HWQ.
21799 *
21800 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
21801 * low watermark.
21802 **/
21803void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21804{
21805 struct lpfc_multixri_pool *multixri_pool;
21806 struct lpfc_pvt_pool *pvt_pool;
21807
21808 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21809 pvt_pool = &multixri_pool->pvt_pool;
21810
21811 if (pvt_pool->count < pvt_pool->low_watermark)
21812 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21813}
21814
21815/**
21816 * lpfc_release_io_buf - Return one IO buf back to free pool
21817 * @phba: pointer to lpfc hba data structure.
21818 * @lpfc_ncmd: IO buf to be returned.
21819 * @qp: belong to which HWQ.
21820 *
21821 * This routine returns one IO buf back to free pool. If this is an urgent IO,
21822 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21823 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21824 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
21825 * lpfc_io_buf_list_put.
21826 **/
21827void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21828 struct lpfc_sli4_hdw_queue *qp)
21829{
21830 unsigned long iflag;
21831 struct lpfc_pbl_pool *pbl_pool;
21832 struct lpfc_pvt_pool *pvt_pool;
21833 struct lpfc_epd_pool *epd_pool;
21834 u32 txcmplq_cnt;
21835 u32 xri_owned;
21836 u32 xri_limit;
21837 u32 abts_io_bufs;
21838
21839 /* MUST zero fields if buffer is reused by another protocol */
21840 lpfc_ncmd->nvmeCmd = NULL;
21841 lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
21842
21843 if (phba->cfg_xpsgl && !phba->nvmet_support &&
21844 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21845 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21846
21847 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21848 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21849
21850 if (phba->cfg_xri_rebalancing) {
21851 if (lpfc_ncmd->expedite) {
21852 /* Return to expedite pool */
21853 epd_pool = &phba->epd_pool;
21854 spin_lock_irqsave(&epd_pool->lock, iflag);
21855 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21856 epd_pool->count++;
21857 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21858 return;
21859 }
21860
21861 /* Avoid invalid access if an IO sneaks in and is being rejected
21862 * just _after_ xri pools are destroyed in lpfc_offline.
21863 * Nothing much can be done at this point.
21864 */
21865 if (!qp->p_multixri_pool)
21866 return;
21867
21868 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21869 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21870
21871 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21872 abts_io_bufs = qp->abts_scsi_io_bufs;
21873 abts_io_bufs += qp->abts_nvme_io_bufs;
21874
21875 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21876 xri_limit = qp->p_multixri_pool->xri_limit;
21877
21878#ifdef LPFC_MXP_STAT
21879 if (xri_owned <= xri_limit)
21880 qp->p_multixri_pool->below_limit_count++;
21881 else
21882 qp->p_multixri_pool->above_limit_count++;
21883#endif
21884
21885 /* XRI goes to either public or private free xri pool
21886 * based on watermark and xri_limit
21887 */
21888 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21889 (xri_owned < xri_limit &&
21890 pvt_pool->count < pvt_pool->high_watermark)) {
21891 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21892 qp, free_pvt_pool);
21893 list_add_tail(&lpfc_ncmd->list,
21894 &pvt_pool->list);
21895 pvt_pool->count++;
21896 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21897 } else {
21898 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21899 qp, free_pub_pool);
21900 list_add_tail(&lpfc_ncmd->list,
21901 &pbl_pool->list);
21902 pbl_pool->count++;
21903 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21904 }
21905 } else {
21906 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21907 qp, free_xri);
21908 list_add_tail(&lpfc_ncmd->list,
21909 &qp->lpfc_io_buf_list_put);
21910 qp->put_io_bufs++;
21911 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21912 iflag);
21913 }
21914}
21915
21916/**
21917 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21918 * @phba: pointer to lpfc hba data structure.
21919 * @qp: pointer to HDW queue
21920 * @pvt_pool: pointer to private pool data structure.
21921 * @ndlp: pointer to lpfc nodelist data structure.
21922 *
21923 * This routine tries to get one free IO buf from private pool.
21924 *
21925 * Return:
21926 * pointer to one free IO buf - if private pool is not empty
21927 * NULL - if private pool is empty
21928 **/
21929static struct lpfc_io_buf *
21930lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21931 struct lpfc_sli4_hdw_queue *qp,
21932 struct lpfc_pvt_pool *pvt_pool,
21933 struct lpfc_nodelist *ndlp)
21934{
21935 struct lpfc_io_buf *lpfc_ncmd;
21936 struct lpfc_io_buf *lpfc_ncmd_next;
21937 unsigned long iflag;
21938
21939 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21940 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21941 &pvt_pool->list, list) {
21942 if (lpfc_test_rrq_active(
21943 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21944 continue;
21945 list_del(&lpfc_ncmd->list);
21946 pvt_pool->count--;
21947 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21948 return lpfc_ncmd;
21949 }
21950 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21951
21952 return NULL;
21953}
21954
21955/**
21956 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21957 * @phba: pointer to lpfc hba data structure.
21958 *
21959 * This routine tries to get one free IO buf from expedite pool.
21960 *
21961 * Return:
21962 * pointer to one free IO buf - if expedite pool is not empty
21963 * NULL - if expedite pool is empty
21964 **/
21965static struct lpfc_io_buf *
21966lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21967{
21968 struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
21969 struct lpfc_io_buf *lpfc_ncmd_next;
21970 unsigned long iflag;
21971 struct lpfc_epd_pool *epd_pool;
21972
21973 epd_pool = &phba->epd_pool;
21974
21975 spin_lock_irqsave(&epd_pool->lock, iflag);
21976 if (epd_pool->count > 0) {
21977 list_for_each_entry_safe(iter, lpfc_ncmd_next,
21978 &epd_pool->list, list) {
21979 list_del(&iter->list);
21980 epd_pool->count--;
21981 lpfc_ncmd = iter;
21982 break;
21983 }
21984 }
21985 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21986
21987 return lpfc_ncmd;
21988}
21989
21990/**
21991 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21992 * @phba: pointer to lpfc hba data structure.
21993 * @ndlp: pointer to lpfc nodelist data structure.
21994 * @hwqid: belong to which HWQ
21995 * @expedite: 1 means this request is urgent.
21996 *
21997 * This routine will do the following actions and then return a pointer to
21998 * one free IO buf.
21999 *
22000 * 1. If private free xri count is empty, move some XRIs from public to
22001 * private pool.
22002 * 2. Get one XRI from private free xri pool.
22003 * 3. If we fail to get one from pvt_pool and this is an expedite request,
22004 * get one free xri from expedite pool.
22005 *
22006 * Note: ndlp is only used on SCSI side for RRQ testing.
22007 * The caller should pass NULL for ndlp on NVME side.
22008 *
22009 * Return:
22010 * pointer to one free IO buf - if private pool is not empty
22011 * NULL - if private pool is empty
22012 **/
22013static struct lpfc_io_buf *
22014lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
22015 struct lpfc_nodelist *ndlp,
22016 int hwqid, int expedite)
22017{
22018 struct lpfc_sli4_hdw_queue *qp;
22019 struct lpfc_multixri_pool *multixri_pool;
22020 struct lpfc_pvt_pool *pvt_pool;
22021 struct lpfc_io_buf *lpfc_ncmd;
22022
22023 qp = &phba->sli4_hba.hdwq[hwqid];
22024 lpfc_ncmd = NULL;
22025 if (!qp) {
22026 lpfc_printf_log(phba, KERN_INFO,
22027 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22028 "5556 NULL qp for hwqid x%x\n", hwqid);
22029 return lpfc_ncmd;
22030 }
22031 multixri_pool = qp->p_multixri_pool;
22032 if (!multixri_pool) {
22033 lpfc_printf_log(phba, KERN_INFO,
22034 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22035 "5557 NULL multixri for hwqid x%x\n", hwqid);
22036 return lpfc_ncmd;
22037 }
22038 pvt_pool = &multixri_pool->pvt_pool;
22039 if (!pvt_pool) {
22040 lpfc_printf_log(phba, KERN_INFO,
22041 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22042 "5558 NULL pvt_pool for hwqid x%x\n", hwqid);
22043 return lpfc_ncmd;
22044 }
22045 multixri_pool->io_req_count++;
22046
22047 /* If pvt_pool is empty, move some XRIs from public to private pool */
22048 if (pvt_pool->count == 0)
22049 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
22050
22051 /* Get one XRI from private free xri pool */
22052 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
22053
22054 if (lpfc_ncmd) {
22055 lpfc_ncmd->hdwq = qp;
22056 lpfc_ncmd->hdwq_no = hwqid;
22057 } else if (expedite) {
22058 /* If we fail to get one from pvt_pool and this is an expedite
22059 * request, get one free xri from expedite pool.
22060 */
22061 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
22062 }
22063
22064 return lpfc_ncmd;
22065}
22066
22067static inline struct lpfc_io_buf *
22068lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
22069{
22070 struct lpfc_sli4_hdw_queue *qp;
22071 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
22072
22073 qp = &phba->sli4_hba.hdwq[idx];
22074 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
22075 &qp->lpfc_io_buf_list_get, list) {
22076 if (lpfc_test_rrq_active(phba, ndlp,
22077 lpfc_cmd->cur_iocbq.sli4_lxritag))
22078 continue;
22079
22080 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
22081 continue;
22082
22083 list_del_init(&lpfc_cmd->list);
22084 qp->get_io_bufs--;
22085 lpfc_cmd->hdwq = qp;
22086 lpfc_cmd->hdwq_no = idx;
22087 return lpfc_cmd;
22088 }
22089 return NULL;
22090}
22091
22092/**
22093 * lpfc_get_io_buf - Get one IO buffer from free pool
22094 * @phba: The HBA for which this call is being executed.
22095 * @ndlp: pointer to lpfc nodelist data structure.
22096 * @hwqid: belong to which HWQ
22097 * @expedite: 1 means this request is urgent.
22098 *
22099 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
22100 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
22101 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
22102 *
22103 * Note: ndlp is only used on SCSI side for RRQ testing.
22104 * The caller should pass NULL for ndlp on NVME side.
22105 *
22106 * Return codes:
22107 * NULL - Error
22108 * Pointer to lpfc_io_buf - Success
22109 **/
22110struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
22111 struct lpfc_nodelist *ndlp,
22112 u32 hwqid, int expedite)
22113{
22114 struct lpfc_sli4_hdw_queue *qp;
22115 unsigned long iflag;
22116 struct lpfc_io_buf *lpfc_cmd;
22117
22118 qp = &phba->sli4_hba.hdwq[hwqid];
22119 lpfc_cmd = NULL;
22120 if (!qp) {
22121 lpfc_printf_log(phba, KERN_WARNING,
22122 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22123 "5555 NULL qp for hwqid x%x\n", hwqid);
22124 return lpfc_cmd;
22125 }
22126
22127 if (phba->cfg_xri_rebalancing)
22128 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
22129 phba, ndlp, hwqid, expedite);
22130 else {
22131 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
22132 qp, alloc_xri_get);
22133 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
22134 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22135 if (!lpfc_cmd) {
22136 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
22137 qp, alloc_xri_put);
22138 list_splice(&qp->lpfc_io_buf_list_put,
22139 &qp->lpfc_io_buf_list_get);
22140 qp->get_io_bufs += qp->put_io_bufs;
22141 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
22142 qp->put_io_bufs = 0;
22143 spin_unlock(&qp->io_buf_list_put_lock);
22144 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
22145 expedite)
22146 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22147 }
22148 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
22149 }
22150
22151 return lpfc_cmd;
22152}
22153
22154/**
22155 * lpfc_read_object - Retrieve object data from HBA
22156 * @phba: The HBA for which this call is being executed.
22157 * @rdobject: Pathname of object data we want to read.
22158 * @datap: Pointer to where data will be copied to.
22159 * @datasz: size of data area
22160 *
22161 * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
22162 * The data will be truncated if datasz is not large enough.
22163 * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
22164 * Returns the actual bytes read from the object.
22165 *
22166 * This routine is hard coded to use a poll completion. Unlike other
22167 * sli4_config mailboxes, it uses lpfc_mbuf memory which is not
22168 * cleaned up in lpfc_sli4_cmd_mbox_free. If this routine is modified
22169 * to use interrupt-based completions, code is needed to fully cleanup
22170 * the memory.
22171 */
22172int
22173lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
22174 uint32_t datasz)
22175{
22176 struct lpfc_mbx_read_object *read_object;
22177 LPFC_MBOXQ_t *mbox;
22178 int rc, length, eof, j, byte_cnt = 0;
22179 uint32_t shdr_status, shdr_add_status;
22180 union lpfc_sli4_cfg_shdr *shdr;
22181 struct lpfc_dmabuf *pcmd;
22182 u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
22183
22184 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
22185 if (!mbox)
22186 return -ENOMEM;
22187 length = (sizeof(struct lpfc_mbx_read_object) -
22188 sizeof(struct lpfc_sli4_cfg_mhdr));
22189 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
22190 LPFC_MBOX_OPCODE_READ_OBJECT,
22191 length, LPFC_SLI4_MBX_EMBED);
22192 read_object = &mbox->u.mqe.un.read_object;
22193 shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
22194
22195 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
22196 bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
22197 read_object->u.request.rd_object_offset = 0;
22198 read_object->u.request.rd_object_cnt = 1;
22199
22200 memset((void *)read_object->u.request.rd_object_name, 0,
22201 LPFC_OBJ_NAME_SZ);
22202 scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
22203 for (j = 0; j < strlen(rdobject); j++)
22204 read_object->u.request.rd_object_name[j] =
22205 cpu_to_le32(rd_object_name[j]);
22206
22207 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
22208 if (pcmd)
22209 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
22210 if (!pcmd || !pcmd->virt) {
22211 kfree(pcmd);
22212 mempool_free(mbox, phba->mbox_mem_pool);
22213 return -ENOMEM;
22214 }
22215 memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
22216 read_object->u.request.rd_object_hbuf[0].pa_lo =
22217 putPaddrLow(pcmd->phys);
22218 read_object->u.request.rd_object_hbuf[0].pa_hi =
22219 putPaddrHigh(pcmd->phys);
22220 read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
22221
22222 mbox->vport = phba->pport;
22223 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
22224 mbox->ctx_ndlp = NULL;
22225
22226 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
22227 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
22228 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
22229
22230 if (shdr_status == STATUS_FAILED &&
22231 shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
22232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22233 "4674 No port cfg file in FW.\n");
22234 byte_cnt = -ENOENT;
22235 } else if (shdr_status || shdr_add_status || rc) {
22236 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22237 "2625 READ_OBJECT mailbox failed with "
22238 "status x%x add_status x%x, mbx status x%x\n",
22239 shdr_status, shdr_add_status, rc);
22240 byte_cnt = -ENXIO;
22241 } else {
22242 /* Success */
22243 length = read_object->u.response.rd_object_actual_rlen;
22244 eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
22245 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
22246 "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
22247 length, datasz, eof);
22248
22249 /* Detect the port config file exists but is empty */
22250 if (!length && eof) {
22251 byte_cnt = 0;
22252 goto exit;
22253 }
22254
22255 byte_cnt = length;
22256 lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
22257 }
22258
22259 exit:
22260 /* This is an embedded SLI4 mailbox with an external buffer allocated.
22261 * Free the pcmd and then cleanup with the correct routine.
22262 */
22263 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
22264 kfree(pcmd);
22265 lpfc_sli4_mbox_cmd_free(phba, mbox);
22266 return byte_cnt;
22267}
22268
22269/**
22270 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
22271 * @phba: The HBA for which this call is being executed.
22272 * @lpfc_buf: IO buf structure to append the SGL chunk
22273 *
22274 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
22275 * and will allocate an SGL chunk if the pool is empty.
22276 *
22277 * Return codes:
22278 * NULL - Error
22279 * Pointer to sli4_hybrid_sgl - Success
22280 **/
22281struct sli4_hybrid_sgl *
22282lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22283{
22284 struct sli4_hybrid_sgl *list_entry = NULL;
22285 struct sli4_hybrid_sgl *tmp = NULL;
22286 struct sli4_hybrid_sgl *allocated_sgl = NULL;
22287 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22288 struct list_head *buf_list = &hdwq->sgl_list;
22289 unsigned long iflags;
22290
22291 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22292
22293 if (likely(!list_empty(buf_list))) {
22294 /* break off 1 chunk from the sgl_list */
22295 list_for_each_entry_safe(list_entry, tmp,
22296 buf_list, list_node) {
22297 list_move_tail(&list_entry->list_node,
22298 &lpfc_buf->dma_sgl_xtra_list);
22299 break;
22300 }
22301 } else {
22302 /* allocate more */
22303 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22304 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22305 cpu_to_node(hdwq->io_wq->chann));
22306 if (!tmp) {
22307 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22308 "8353 error kmalloc memory for HDWQ "
22309 "%d %s\n",
22310 lpfc_buf->hdwq_no, __func__);
22311 return NULL;
22312 }
22313
22314 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
22315 GFP_ATOMIC, &tmp->dma_phys_sgl);
22316 if (!tmp->dma_sgl) {
22317 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22318 "8354 error pool_alloc memory for HDWQ "
22319 "%d %s\n",
22320 lpfc_buf->hdwq_no, __func__);
22321 kfree(tmp);
22322 return NULL;
22323 }
22324
22325 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22326 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
22327 }
22328
22329 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
22330 struct sli4_hybrid_sgl,
22331 list_node);
22332
22333 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22334
22335 return allocated_sgl;
22336}
22337
22338/**
22339 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
22340 * @phba: The HBA for which this call is being executed.
22341 * @lpfc_buf: IO buf structure with the SGL chunk
22342 *
22343 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
22344 *
22345 * Return codes:
22346 * 0 - Success
22347 * -EINVAL - Error
22348 **/
22349int
22350lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22351{
22352 int rc = 0;
22353 struct sli4_hybrid_sgl *list_entry = NULL;
22354 struct sli4_hybrid_sgl *tmp = NULL;
22355 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22356 struct list_head *buf_list = &hdwq->sgl_list;
22357 unsigned long iflags;
22358
22359 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22360
22361 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
22362 list_for_each_entry_safe(list_entry, tmp,
22363 &lpfc_buf->dma_sgl_xtra_list,
22364 list_node) {
22365 list_move_tail(&list_entry->list_node,
22366 buf_list);
22367 }
22368 } else {
22369 rc = -EINVAL;
22370 }
22371
22372 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22373 return rc;
22374}
22375
22376/**
22377 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
22378 * @phba: phba object
22379 * @hdwq: hdwq to cleanup sgl buff resources on
22380 *
22381 * This routine frees all SGL chunks of hdwq SGL chunk pool.
22382 *
22383 * Return codes:
22384 * None
22385 **/
22386void
22387lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
22388 struct lpfc_sli4_hdw_queue *hdwq)
22389{
22390 struct list_head *buf_list = &hdwq->sgl_list;
22391 struct sli4_hybrid_sgl *list_entry = NULL;
22392 struct sli4_hybrid_sgl *tmp = NULL;
22393 unsigned long iflags;
22394
22395 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22396
22397 /* Free sgl pool */
22398 list_for_each_entry_safe(list_entry, tmp,
22399 buf_list, list_node) {
22400 list_del(&list_entry->list_node);
22401 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
22402 list_entry->dma_sgl,
22403 list_entry->dma_phys_sgl);
22404 kfree(list_entry);
22405 }
22406
22407 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22408}
22409
22410/**
22411 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
22412 * @phba: The HBA for which this call is being executed.
22413 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
22414 *
22415 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
22416 * and will allocate an CMD/RSP buffer if the pool is empty.
22417 *
22418 * Return codes:
22419 * NULL - Error
22420 * Pointer to fcp_cmd_rsp_buf - Success
22421 **/
22422struct fcp_cmd_rsp_buf *
22423lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22424 struct lpfc_io_buf *lpfc_buf)
22425{
22426 struct fcp_cmd_rsp_buf *list_entry = NULL;
22427 struct fcp_cmd_rsp_buf *tmp = NULL;
22428 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
22429 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22430 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22431 unsigned long iflags;
22432
22433 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22434
22435 if (likely(!list_empty(buf_list))) {
22436 /* break off 1 chunk from the list */
22437 list_for_each_entry_safe(list_entry, tmp,
22438 buf_list,
22439 list_node) {
22440 list_move_tail(&list_entry->list_node,
22441 &lpfc_buf->dma_cmd_rsp_list);
22442 break;
22443 }
22444 } else {
22445 /* allocate more */
22446 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22447 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22448 cpu_to_node(hdwq->io_wq->chann));
22449 if (!tmp) {
22450 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22451 "8355 error kmalloc memory for HDWQ "
22452 "%d %s\n",
22453 lpfc_buf->hdwq_no, __func__);
22454 return NULL;
22455 }
22456
22457 tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
22458 GFP_ATOMIC,
22459 &tmp->fcp_cmd_rsp_dma_handle);
22460
22461 if (!tmp->fcp_cmnd) {
22462 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22463 "8356 error pool_alloc memory for HDWQ "
22464 "%d %s\n",
22465 lpfc_buf->hdwq_no, __func__);
22466 kfree(tmp);
22467 return NULL;
22468 }
22469
22470 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
22471 sizeof(struct fcp_cmnd32));
22472
22473 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22474 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
22475 }
22476
22477 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
22478 struct fcp_cmd_rsp_buf,
22479 list_node);
22480
22481 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22482
22483 return allocated_buf;
22484}
22485
22486/**
22487 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
22488 * @phba: The HBA for which this call is being executed.
22489 * @lpfc_buf: IO buf structure with the CMD/RSP buf
22490 *
22491 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
22492 *
22493 * Return codes:
22494 * 0 - Success
22495 * -EINVAL - Error
22496 **/
22497int
22498lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22499 struct lpfc_io_buf *lpfc_buf)
22500{
22501 int rc = 0;
22502 struct fcp_cmd_rsp_buf *list_entry = NULL;
22503 struct fcp_cmd_rsp_buf *tmp = NULL;
22504 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22505 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22506 unsigned long iflags;
22507
22508 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22509
22510 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
22511 list_for_each_entry_safe(list_entry, tmp,
22512 &lpfc_buf->dma_cmd_rsp_list,
22513 list_node) {
22514 list_move_tail(&list_entry->list_node,
22515 buf_list);
22516 }
22517 } else {
22518 rc = -EINVAL;
22519 }
22520
22521 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22522 return rc;
22523}
22524
22525/**
22526 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
22527 * @phba: phba object
22528 * @hdwq: hdwq to cleanup cmd rsp buff resources on
22529 *
22530 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
22531 *
22532 * Return codes:
22533 * None
22534 **/
22535void
22536lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22537 struct lpfc_sli4_hdw_queue *hdwq)
22538{
22539 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22540 struct fcp_cmd_rsp_buf *list_entry = NULL;
22541 struct fcp_cmd_rsp_buf *tmp = NULL;
22542 unsigned long iflags;
22543
22544 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22545
22546 /* Free cmd_rsp buf pool */
22547 list_for_each_entry_safe(list_entry, tmp,
22548 buf_list,
22549 list_node) {
22550 list_del(&list_entry->list_node);
22551 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
22552 list_entry->fcp_cmnd,
22553 list_entry->fcp_cmd_rsp_dma_handle);
22554 kfree(list_entry);
22555 }
22556
22557 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22558}
22559
22560/**
22561 * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
22562 * @phba: phba object
22563 * @job: job entry of the command to be posted.
22564 *
22565 * Fill the common fields of the wqe for each of the command.
22566 *
22567 * Return codes:
22568 * None
22569 **/
22570void
22571lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
22572{
22573 u8 cmnd;
22574 u32 *pcmd;
22575 u32 if_type = 0;
22576 u32 abort_tag;
22577 bool fip;
22578 struct lpfc_nodelist *ndlp = NULL;
22579 union lpfc_wqe128 *wqe = &job->wqe;
22580 u8 command_type = ELS_COMMAND_NON_FIP;
22581
22582 fip = test_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
22583 /* The fcp commands will set command type */
22584 if (job->cmd_flag & LPFC_IO_FCP)
22585 command_type = FCP_COMMAND;
22586 else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
22587 command_type = ELS_COMMAND_FIP;
22588 else
22589 command_type = ELS_COMMAND_NON_FIP;
22590
22591 abort_tag = job->iotag;
22592 cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
22593
22594 switch (cmnd) {
22595 case CMD_ELS_REQUEST64_WQE:
22596 ndlp = job->ndlp;
22597
22598 if_type = bf_get(lpfc_sli_intf_if_type,
22599 &phba->sli4_hba.sli_intf);
22600 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22601 pcmd = (u32 *)job->cmd_dmabuf->virt;
22602 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
22603 *pcmd == ELS_CMD_SCR ||
22604 *pcmd == ELS_CMD_RDF ||
22605 *pcmd == ELS_CMD_EDC ||
22606 *pcmd == ELS_CMD_RSCN_XMT ||
22607 *pcmd == ELS_CMD_FDISC ||
22608 *pcmd == ELS_CMD_LOGO ||
22609 *pcmd == ELS_CMD_QFPA ||
22610 *pcmd == ELS_CMD_UVEM ||
22611 *pcmd == ELS_CMD_PLOGI)) {
22612 bf_set(els_req64_sp, &wqe->els_req, 1);
22613 bf_set(els_req64_sid, &wqe->els_req,
22614 job->vport->fc_myDID);
22615
22616 if ((*pcmd == ELS_CMD_FLOGI) &&
22617 !(phba->fc_topology ==
22618 LPFC_TOPOLOGY_LOOP))
22619 bf_set(els_req64_sid, &wqe->els_req, 0);
22620
22621 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
22622 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22623 phba->vpi_ids[job->vport->vpi]);
22624 } else if (pcmd) {
22625 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
22626 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22627 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22628 }
22629 }
22630
22631 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
22632 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22633
22634 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
22635 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
22636 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
22637 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22638 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
22639 break;
22640 case CMD_XMIT_ELS_RSP64_WQE:
22641 ndlp = job->ndlp;
22642
22643 /* word4 */
22644 wqe->xmit_els_rsp.word4 = 0;
22645
22646 if_type = bf_get(lpfc_sli_intf_if_type,
22647 &phba->sli4_hba.sli_intf);
22648 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22649 if (test_bit(FC_PT2PT, &job->vport->fc_flag)) {
22650 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22651 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22652 job->vport->fc_myDID);
22653 if (job->vport->fc_myDID == Fabric_DID) {
22654 bf_set(wqe_els_did,
22655 &wqe->xmit_els_rsp.wqe_dest, 0);
22656 }
22657 }
22658 }
22659
22660 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
22661 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
22662 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
22663 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
22664 LPFC_WQE_LENLOC_WORD3);
22665 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
22666
22667 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
22668 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22669 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22670 job->vport->fc_myDID);
22671 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
22672 }
22673
22674 if (phba->sli_rev == LPFC_SLI_REV4) {
22675 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
22676 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22677
22678 if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
22679 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
22680 phba->vpi_ids[job->vport->vpi]);
22681 }
22682 command_type = OTHER_COMMAND;
22683 break;
22684 case CMD_GEN_REQUEST64_WQE:
22685 /* Word 10 */
22686 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
22687 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
22688 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
22689 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22690 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
22691 command_type = OTHER_COMMAND;
22692 break;
22693 case CMD_XMIT_SEQUENCE64_WQE:
22694 if (phba->link_flag & LS_LOOPBACK_MODE)
22695 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
22696
22697 wqe->xmit_sequence.rsvd3 = 0;
22698 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
22699 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
22700 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
22701 LPFC_WQE_IOD_WRITE);
22702 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
22703 LPFC_WQE_LENLOC_WORD12);
22704 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
22705 command_type = OTHER_COMMAND;
22706 break;
22707 case CMD_XMIT_BLS_RSP64_WQE:
22708 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
22709 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
22710 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
22711 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
22712 phba->vpi_ids[phba->pport->vpi]);
22713 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
22714 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
22715 LPFC_WQE_LENLOC_NONE);
22716 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
22717 command_type = OTHER_COMMAND;
22718 break;
22719 case CMD_FCP_ICMND64_WQE: /* task mgmt commands */
22720 case CMD_ABORT_XRI_WQE: /* abort iotag */
22721 case CMD_SEND_FRAME: /* mds loopback */
22722 /* cases already formatted for sli4 wqe - no chgs necessary */
22723 return;
22724 default:
22725 dump_stack();
22726 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
22727 "6207 Invalid command 0x%x\n",
22728 cmnd);
22729 break;
22730 }
22731
22732 wqe->generic.wqe_com.abort_tag = abort_tag;
22733 bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
22734 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
22735 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
22736}