Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/errno.h>
7#include <linux/io.h>
8#include <linux/slab.h>
9#include <linux/etherdevice.h>
10#include "ionic.h"
11#include "ionic_dev.h"
12#include "ionic_lif.h"
13
14static void ionic_watchdog_cb(struct timer_list *t)
15{
16 struct ionic *ionic = timer_container_of(ionic, t, watchdog_timer);
17 struct ionic_lif *lif = ionic->lif;
18 struct ionic_deferred_work *work;
19 int hb;
20
21 mod_timer(&ionic->watchdog_timer,
22 round_jiffies(jiffies + ionic->watchdog_period));
23
24 if (!lif)
25 return;
26
27 hb = ionic_heartbeat_check(ionic);
28 dev_dbg(ionic->dev, "%s: hb %d running %d UP %d\n",
29 __func__, hb, netif_running(lif->netdev),
30 test_bit(IONIC_LIF_F_UP, lif->state));
31
32 if (hb >= 0 &&
33 !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
34 ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
35
36 if (test_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state) &&
37 !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
38 work = kzalloc(sizeof(*work), GFP_ATOMIC);
39 if (!work) {
40 netdev_err(lif->netdev, "rxmode change dropped\n");
41 return;
42 }
43
44 work->type = IONIC_DW_TYPE_RX_MODE;
45 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
46 ionic_lif_deferred_enqueue(lif, work);
47 }
48}
49
50static void ionic_napi_schedule_do_softirq(struct napi_struct *napi)
51{
52 local_bh_disable();
53 napi_schedule(napi);
54 local_bh_enable();
55}
56
57void ionic_doorbell_napi_work(struct work_struct *work)
58{
59 struct ionic_qcq *qcq = container_of(work, struct ionic_qcq,
60 doorbell_napi_work);
61 unsigned long now, then, dif;
62
63 now = READ_ONCE(jiffies);
64 then = qcq->q.dbell_jiffies;
65 dif = now - then;
66
67 if (dif > qcq->q.dbell_deadline)
68 ionic_napi_schedule_do_softirq(&qcq->napi);
69}
70
71static int ionic_get_preferred_cpu(struct ionic *ionic,
72 struct ionic_intr_info *intr)
73{
74 int cpu;
75
76 cpu = cpumask_first_and(*intr->affinity_mask, cpu_online_mask);
77 if (cpu >= nr_cpu_ids)
78 cpu = cpumask_local_spread(0, dev_to_node(ionic->dev));
79
80 return cpu;
81}
82
83static void ionic_queue_dbell_napi_work(struct ionic *ionic,
84 struct ionic_qcq *qcq)
85{
86 int cpu;
87
88 if (!(qcq->flags & IONIC_QCQ_F_INTR))
89 return;
90
91 cpu = ionic_get_preferred_cpu(ionic, &qcq->intr);
92 queue_work_on(cpu, ionic->wq, &qcq->doorbell_napi_work);
93}
94
95static void ionic_doorbell_check_dwork(struct work_struct *work)
96{
97 struct ionic *ionic = container_of(work, struct ionic,
98 doorbell_check_dwork.work);
99 struct ionic_lif *lif = ionic->lif;
100
101 mutex_lock(&lif->queue_lock);
102
103 if (test_bit(IONIC_LIF_F_FW_STOPPING, lif->state) ||
104 test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
105 mutex_unlock(&lif->queue_lock);
106 return;
107 }
108
109 ionic_napi_schedule_do_softirq(&lif->adminqcq->napi);
110
111 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
112 int i;
113
114 for (i = 0; i < lif->nxqs; i++) {
115 ionic_queue_dbell_napi_work(ionic, lif->txqcqs[i]);
116 ionic_queue_dbell_napi_work(ionic, lif->rxqcqs[i]);
117 }
118
119 if (lif->hwstamp_txq &&
120 lif->hwstamp_txq->flags & IONIC_QCQ_F_INTR)
121 ionic_napi_schedule_do_softirq(&lif->hwstamp_txq->napi);
122 if (lif->hwstamp_rxq &&
123 lif->hwstamp_rxq->flags & IONIC_QCQ_F_INTR)
124 ionic_napi_schedule_do_softirq(&lif->hwstamp_rxq->napi);
125 }
126 mutex_unlock(&lif->queue_lock);
127
128 ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
129}
130
131bool ionic_doorbell_wa(struct ionic *ionic)
132{
133 u8 asic_type = ionic->idev.dev_info.asic_type;
134
135 return !asic_type || asic_type == IONIC_ASIC_TYPE_ELBA;
136}
137
138static int ionic_watchdog_init(struct ionic *ionic)
139{
140 struct ionic_dev *idev = &ionic->idev;
141
142 timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0);
143 ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ;
144
145 /* set times to ensure the first check will proceed */
146 atomic_long_set(&idev->last_check_time, jiffies - 2 * HZ);
147 idev->last_hb_time = jiffies - 2 * ionic->watchdog_period;
148 /* init as ready, so no transition if the first check succeeds */
149 idev->last_fw_hb = 0;
150 idev->fw_hb_ready = true;
151 idev->fw_status_ready = true;
152 idev->fw_generation = IONIC_FW_STS_F_GENERATION &
153 ioread8(&idev->dev_info_regs->fw_status);
154
155 ionic->wq = alloc_workqueue("%s-wq", WQ_UNBOUND, 0,
156 dev_name(ionic->dev));
157 if (!ionic->wq) {
158 dev_err(ionic->dev, "alloc_workqueue failed");
159 return -ENOMEM;
160 }
161
162 if (ionic_doorbell_wa(ionic))
163 INIT_DELAYED_WORK(&ionic->doorbell_check_dwork,
164 ionic_doorbell_check_dwork);
165
166 return 0;
167}
168
169void ionic_queue_doorbell_check(struct ionic *ionic, int delay)
170{
171 int cpu;
172
173 if (!ionic->lif->doorbell_wa)
174 return;
175
176 cpu = ionic_get_preferred_cpu(ionic, &ionic->lif->adminqcq->intr);
177 queue_delayed_work_on(cpu, ionic->wq, &ionic->doorbell_check_dwork,
178 delay);
179}
180
181void ionic_init_devinfo(struct ionic *ionic)
182{
183 struct ionic_dev *idev = &ionic->idev;
184
185 idev->dev_info.asic_type = ioread8(&idev->dev_info_regs->asic_type);
186 idev->dev_info.asic_rev = ioread8(&idev->dev_info_regs->asic_rev);
187
188 memcpy_fromio(idev->dev_info.fw_version,
189 idev->dev_info_regs->fw_version,
190 IONIC_DEVINFO_FWVERS_BUFLEN);
191
192 memcpy_fromio(idev->dev_info.serial_num,
193 idev->dev_info_regs->serial_num,
194 IONIC_DEVINFO_SERIAL_BUFLEN);
195
196 idev->dev_info.fw_version[IONIC_DEVINFO_FWVERS_BUFLEN] = 0;
197 idev->dev_info.serial_num[IONIC_DEVINFO_SERIAL_BUFLEN] = 0;
198
199 dev_dbg(ionic->dev, "fw_version %s\n", idev->dev_info.fw_version);
200}
201
202static void ionic_map_disc_cmb(struct ionic *ionic)
203{
204 struct ionic_identity *ident = &ionic->ident;
205 u32 length_reg0, length, offset, num_regions;
206 struct ionic_dev_bar *bar = ionic->bars;
207 struct ionic_dev *idev = &ionic->idev;
208 struct device *dev = ionic->dev;
209 int err, sz, i;
210 u64 end;
211
212 mutex_lock(&ionic->dev_cmd_lock);
213
214 ionic_dev_cmd_discover_cmb(idev);
215 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
216 if (!err) {
217 sz = min(sizeof(ident->cmb_layout),
218 sizeof(idev->dev_cmd_regs->data));
219 memcpy_fromio(&ident->cmb_layout,
220 &idev->dev_cmd_regs->data, sz);
221 }
222 mutex_unlock(&ionic->dev_cmd_lock);
223
224 if (err) {
225 dev_warn(dev, "Cannot discover CMB layout, disabling CMB\n");
226 return;
227 }
228
229 bar += 2;
230
231 num_regions = le32_to_cpu(ident->cmb_layout.num_regions);
232 if (!num_regions || num_regions > IONIC_MAX_CMB_REGIONS) {
233 dev_warn(dev, "Invalid number of CMB entries (%d)\n",
234 num_regions);
235 return;
236 }
237
238 dev_dbg(dev, "ionic_cmb_layout_identity num_regions %d flags %x:\n",
239 num_regions, ident->cmb_layout.flags);
240
241 for (i = 0; i < num_regions; i++) {
242 offset = le32_to_cpu(ident->cmb_layout.region[i].offset);
243 length = le32_to_cpu(ident->cmb_layout.region[i].length);
244 end = offset + length;
245
246 dev_dbg(dev, "CMB entry %d: bar_num %u cmb_type %u offset %x length %u\n",
247 i, ident->cmb_layout.region[i].bar_num,
248 ident->cmb_layout.region[i].cmb_type,
249 offset, length);
250
251 if (end > (bar->len >> IONIC_CMB_SHIFT_64K)) {
252 dev_warn(dev, "Out of bounds CMB region %d offset %x length %u\n",
253 i, offset, length);
254 return;
255 }
256 }
257
258 /* if first entry matches PCI config, expdb is not supported */
259 if (ident->cmb_layout.region[0].bar_num == bar->res_index &&
260 le32_to_cpu(ident->cmb_layout.region[0].length) == bar->len &&
261 !ident->cmb_layout.region[0].offset) {
262 dev_warn(dev, "No CMB mapping discovered\n");
263 return;
264 }
265
266 /* process first entry for regular mapping */
267 length_reg0 = le32_to_cpu(ident->cmb_layout.region[0].length);
268 if (!length_reg0) {
269 dev_warn(dev, "region len = 0. No CMB mapping discovered\n");
270 return;
271 }
272
273 /* Verify first entry size matches expected 8MB size (in 64KB pages) */
274 if (length_reg0 != IONIC_BAR2_CMB_ENTRY_SIZE >> IONIC_CMB_SHIFT_64K) {
275 dev_warn(dev, "Unexpected CMB size in entry 0: %u pages\n",
276 length_reg0);
277 return;
278 }
279
280 sz = BITS_TO_LONGS((length_reg0 << IONIC_CMB_SHIFT_64K) /
281 PAGE_SIZE) * sizeof(long);
282 idev->cmb_inuse = kzalloc(sz, GFP_KERNEL);
283 if (!idev->cmb_inuse) {
284 dev_warn(dev, "No memory for CMB, disabling\n");
285 idev->phy_cmb_pages = 0;
286 idev->phy_cmb_expdb64_pages = 0;
287 idev->phy_cmb_expdb128_pages = 0;
288 idev->phy_cmb_expdb256_pages = 0;
289 idev->phy_cmb_expdb512_pages = 0;
290 idev->cmb_npages = 0;
291 return;
292 }
293
294 for (i = 0; i < num_regions; i++) {
295 /* check this region matches first region length as to
296 * ease implementation
297 */
298 if (le32_to_cpu(ident->cmb_layout.region[i].length) !=
299 length_reg0)
300 continue;
301
302 offset = le32_to_cpu(ident->cmb_layout.region[i].offset);
303
304 switch (ident->cmb_layout.region[i].cmb_type) {
305 case IONIC_CMB_TYPE_DEVMEM:
306 idev->phy_cmb_pages = bar->bus_addr + offset;
307 idev->cmb_npages =
308 (length_reg0 << IONIC_CMB_SHIFT_64K) / PAGE_SIZE;
309 dev_dbg(dev, "regular cmb mapping: bar->bus_addr %pa region[%d].length %u\n",
310 &bar->bus_addr, i, length);
311 dev_dbg(dev, "idev->phy_cmb_pages %pad, idev->cmb_npages %u\n",
312 &idev->phy_cmb_pages, idev->cmb_npages);
313 break;
314
315 case IONIC_CMB_TYPE_EXPDB64:
316 idev->phy_cmb_expdb64_pages =
317 bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
318 dev_dbg(dev, "idev->phy_cmb_expdb64_pages %pad\n",
319 &idev->phy_cmb_expdb64_pages);
320 break;
321
322 case IONIC_CMB_TYPE_EXPDB128:
323 idev->phy_cmb_expdb128_pages =
324 bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
325 dev_dbg(dev, "idev->phy_cmb_expdb128_pages %pad\n",
326 &idev->phy_cmb_expdb128_pages);
327 break;
328
329 case IONIC_CMB_TYPE_EXPDB256:
330 idev->phy_cmb_expdb256_pages =
331 bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
332 dev_dbg(dev, "idev->phy_cmb_expdb256_pages %pad\n",
333 &idev->phy_cmb_expdb256_pages);
334 break;
335
336 case IONIC_CMB_TYPE_EXPDB512:
337 idev->phy_cmb_expdb512_pages =
338 bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
339 dev_dbg(dev, "idev->phy_cmb_expdb512_pages %pad\n",
340 &idev->phy_cmb_expdb512_pages);
341 break;
342
343 default:
344 dev_warn(dev, "[%d] Invalid cmb_type (%d)\n",
345 i, ident->cmb_layout.region[i].cmb_type);
346 break;
347 }
348 }
349}
350
351static void ionic_map_classic_cmb(struct ionic *ionic)
352{
353 struct ionic_dev_bar *bar = ionic->bars;
354 struct ionic_dev *idev = &ionic->idev;
355 struct device *dev = ionic->dev;
356 int sz;
357
358 bar += 2;
359 /* classic CMB mapping */
360 idev->phy_cmb_pages = bar->bus_addr;
361 idev->cmb_npages = bar->len / PAGE_SIZE;
362 dev_dbg(dev, "classic cmb mapping: bar->bus_addr %pa bar->len %lu\n",
363 &bar->bus_addr, bar->len);
364 dev_dbg(dev, "idev->phy_cmb_pages %pad, idev->cmb_npages %u\n",
365 &idev->phy_cmb_pages, idev->cmb_npages);
366
367 sz = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
368 idev->cmb_inuse = kzalloc(sz, GFP_KERNEL);
369 if (!idev->cmb_inuse) {
370 idev->phy_cmb_pages = 0;
371 idev->cmb_npages = 0;
372 }
373}
374
375void ionic_map_cmb(struct ionic *ionic)
376{
377 struct pci_dev *pdev = ionic->pdev;
378 struct device *dev = ionic->dev;
379
380 if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM)) {
381 dev_dbg(dev, "No CMB, disabling\n");
382 return;
383 }
384
385 if (ionic->ident.dev.capabilities & cpu_to_le64(IONIC_DEV_CAP_DISC_CMB))
386 ionic_map_disc_cmb(ionic);
387 else
388 ionic_map_classic_cmb(ionic);
389}
390
391int ionic_dev_setup(struct ionic *ionic)
392{
393 struct ionic_dev_bar *bar = ionic->bars;
394 unsigned int num_bars = ionic->num_bars;
395 struct ionic_dev *idev = &ionic->idev;
396 struct device *dev = ionic->dev;
397 u32 sig;
398 int err;
399
400 /* BAR0: dev_cmd and interrupts */
401 if (num_bars < 1) {
402 dev_err(dev, "No bars found, aborting\n");
403 return -EFAULT;
404 }
405
406 if (bar->len < IONIC_BAR0_SIZE) {
407 dev_err(dev, "Resource bar size %lu too small, aborting\n",
408 bar->len);
409 return -EFAULT;
410 }
411
412 idev->dev_info_regs = bar->vaddr + IONIC_BAR0_DEV_INFO_REGS_OFFSET;
413 idev->dev_cmd_regs = bar->vaddr + IONIC_BAR0_DEV_CMD_REGS_OFFSET;
414 idev->intr_status = bar->vaddr + IONIC_BAR0_INTR_STATUS_OFFSET;
415 idev->intr_ctrl = bar->vaddr + IONIC_BAR0_INTR_CTRL_OFFSET;
416
417 idev->hwstamp_regs = &idev->dev_info_regs->hwstamp;
418
419 sig = ioread32(&idev->dev_info_regs->signature);
420 if (sig != IONIC_DEV_INFO_SIGNATURE) {
421 dev_err(dev, "Incompatible firmware signature %x", sig);
422 return -EFAULT;
423 }
424
425 ionic_init_devinfo(ionic);
426
427 /* BAR1: doorbells */
428 bar++;
429 if (num_bars < 2) {
430 dev_err(dev, "Doorbell bar missing, aborting\n");
431 return -EFAULT;
432 }
433
434 err = ionic_watchdog_init(ionic);
435 if (err)
436 return err;
437
438 idev->db_pages = bar->vaddr;
439 idev->phy_db_pages = bar->bus_addr;
440
441 /* BAR2: optional controller memory mapping */
442 bar++;
443 mutex_init(&idev->cmb_inuse_lock);
444 if (num_bars < 3 || !ionic->bars[IONIC_PCI_BAR_CMB].len) {
445 idev->cmb_inuse = NULL;
446 idev->phy_cmb_pages = 0;
447 idev->cmb_npages = 0;
448 return 0;
449 }
450
451 return 0;
452}
453
454void ionic_dev_teardown(struct ionic *ionic)
455{
456 struct ionic_dev *idev = &ionic->idev;
457
458 kfree(idev->cmb_inuse);
459 idev->cmb_inuse = NULL;
460 idev->phy_cmb_pages = 0;
461 idev->cmb_npages = 0;
462
463 idev->phy_cmb_expdb64_pages = 0;
464 idev->phy_cmb_expdb128_pages = 0;
465 idev->phy_cmb_expdb256_pages = 0;
466 idev->phy_cmb_expdb512_pages = 0;
467
468 if (ionic->wq) {
469 destroy_workqueue(ionic->wq);
470 ionic->wq = NULL;
471 }
472 mutex_destroy(&idev->cmb_inuse_lock);
473}
474
475/* Devcmd Interface */
476static bool __ionic_is_fw_running(struct ionic_dev *idev, u8 *status_ptr)
477{
478 u8 fw_status;
479
480 if (!idev->dev_info_regs) {
481 if (status_ptr)
482 *status_ptr = 0xff;
483 return false;
484 }
485
486 fw_status = ioread8(&idev->dev_info_regs->fw_status);
487 if (status_ptr)
488 *status_ptr = fw_status;
489
490 /* firmware is useful only if the running bit is set and
491 * fw_status != 0xff (bad PCI read)
492 */
493 return (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING);
494}
495
496bool ionic_is_fw_running(struct ionic_dev *idev)
497{
498 return __ionic_is_fw_running(idev, NULL);
499}
500
501int ionic_heartbeat_check(struct ionic *ionic)
502{
503 unsigned long check_time, last_check_time;
504 struct ionic_dev *idev = &ionic->idev;
505 struct ionic_lif *lif = ionic->lif;
506 bool fw_status_ready = true;
507 bool fw_hb_ready;
508 u8 fw_generation;
509 u8 fw_status;
510 u32 fw_hb;
511
512 /* wait a least one second before testing again */
513 check_time = jiffies;
514 last_check_time = atomic_long_read(&idev->last_check_time);
515do_check_time:
516 if (time_before(check_time, last_check_time + HZ))
517 return 0;
518 if (!atomic_long_try_cmpxchg_relaxed(&idev->last_check_time,
519 &last_check_time, check_time)) {
520 /* if called concurrently, only the first should proceed. */
521 dev_dbg(ionic->dev, "%s: do_check_time again\n", __func__);
522 goto do_check_time;
523 }
524
525 /* If fw_status is not ready don't bother with the generation */
526 if (!__ionic_is_fw_running(idev, &fw_status)) {
527 fw_status_ready = false;
528 } else {
529 fw_generation = fw_status & IONIC_FW_STS_F_GENERATION;
530 if (idev->fw_generation != fw_generation) {
531 dev_info(ionic->dev, "FW generation 0x%02x -> 0x%02x\n",
532 idev->fw_generation, fw_generation);
533
534 idev->fw_generation = fw_generation;
535
536 /* If the generation changed, the fw status is not
537 * ready so we need to trigger a fw-down cycle. After
538 * the down, the next watchdog will see the fw is up
539 * and the generation value stable, so will trigger
540 * the fw-up activity.
541 *
542 * If we had already moved to FW_RESET from a RESET event,
543 * it is possible that we never saw the fw_status go to 0,
544 * so we fake the current idev->fw_status_ready here to
545 * force the transition and get FW up again.
546 */
547 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
548 idev->fw_status_ready = false; /* go to running */
549 else
550 fw_status_ready = false; /* go to down */
551 }
552 }
553
554 dev_dbg(ionic->dev, "fw_status 0x%02x ready %d idev->ready %d last_hb 0x%x state 0x%02lx\n",
555 fw_status, fw_status_ready, idev->fw_status_ready,
556 idev->last_fw_hb, lif->state[0]);
557
558 /* is this a transition? */
559 if (fw_status_ready != idev->fw_status_ready &&
560 !test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
561 bool trigger = false;
562
563 idev->fw_status_ready = fw_status_ready;
564
565 if (!fw_status_ready &&
566 !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
567 !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
568 dev_info(ionic->dev, "FW stopped 0x%02x\n", fw_status);
569 trigger = true;
570
571 } else if (fw_status_ready &&
572 test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
573 dev_info(ionic->dev, "FW running 0x%02x\n", fw_status);
574 trigger = true;
575 }
576
577 if (trigger) {
578 struct ionic_deferred_work *work;
579
580 work = kzalloc(sizeof(*work), GFP_ATOMIC);
581 if (work) {
582 work->type = IONIC_DW_TYPE_LIF_RESET;
583 work->fw_status = fw_status_ready;
584 ionic_lif_deferred_enqueue(lif, work);
585 }
586 }
587 }
588
589 if (!idev->fw_status_ready)
590 return -ENXIO;
591
592 /* Because of some variability in the actual FW heartbeat, we
593 * wait longer than the DEVCMD_TIMEOUT before checking again.
594 */
595 last_check_time = idev->last_hb_time;
596 if (time_before(check_time, last_check_time + DEVCMD_TIMEOUT * 2 * HZ))
597 return 0;
598
599 fw_hb = ioread32(&idev->dev_info_regs->fw_heartbeat);
600 fw_hb_ready = fw_hb != idev->last_fw_hb;
601
602 /* early FW version had no heartbeat, so fake it */
603 if (!fw_hb_ready && !fw_hb)
604 fw_hb_ready = true;
605
606 dev_dbg(ionic->dev, "%s: fw_hb %u last_fw_hb %u ready %u\n",
607 __func__, fw_hb, idev->last_fw_hb, fw_hb_ready);
608
609 idev->last_fw_hb = fw_hb;
610
611 /* log a transition */
612 if (fw_hb_ready != idev->fw_hb_ready) {
613 idev->fw_hb_ready = fw_hb_ready;
614 if (!fw_hb_ready)
615 dev_info(ionic->dev, "FW heartbeat stalled at %u\n", fw_hb);
616 else
617 dev_info(ionic->dev, "FW heartbeat restored at %u\n", fw_hb);
618 }
619
620 if (!fw_hb_ready)
621 return -ENXIO;
622
623 idev->last_hb_time = check_time;
624
625 return 0;
626}
627
628u8 ionic_dev_cmd_status(struct ionic_dev *idev)
629{
630 if (!idev->dev_cmd_regs)
631 return (u8)PCI_ERROR_RESPONSE;
632 return ioread8(&idev->dev_cmd_regs->comp.comp.status);
633}
634
635bool ionic_dev_cmd_done(struct ionic_dev *idev)
636{
637 if (!idev->dev_cmd_regs)
638 return false;
639 return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE;
640}
641
642void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp)
643{
644 if (!idev->dev_cmd_regs)
645 return;
646 memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp));
647}
648
649void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd)
650{
651 idev->opcode = cmd->cmd.opcode;
652
653 if (!idev->dev_cmd_regs)
654 return;
655
656 memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd));
657 iowrite32(0, &idev->dev_cmd_regs->done);
658 iowrite32(1, &idev->dev_cmd_regs->doorbell);
659}
660
661/* Device commands */
662void ionic_dev_cmd_identify(struct ionic_dev *idev, u8 ver)
663{
664 union ionic_dev_cmd cmd = {
665 .identify.opcode = IONIC_CMD_IDENTIFY,
666 .identify.ver = ver,
667 };
668
669 ionic_dev_cmd_go(idev, &cmd);
670}
671
672void ionic_dev_cmd_init(struct ionic_dev *idev)
673{
674 union ionic_dev_cmd cmd = {
675 .init.opcode = IONIC_CMD_INIT,
676 .init.type = 0,
677 };
678
679 ionic_dev_cmd_go(idev, &cmd);
680}
681
682void ionic_dev_cmd_reset(struct ionic_dev *idev)
683{
684 union ionic_dev_cmd cmd = {
685 .reset.opcode = IONIC_CMD_RESET,
686 };
687
688 ionic_dev_cmd_go(idev, &cmd);
689}
690
691/* Port commands */
692void ionic_dev_cmd_port_identify(struct ionic_dev *idev)
693{
694 union ionic_dev_cmd cmd = {
695 .port_init.opcode = IONIC_CMD_PORT_IDENTIFY,
696 .port_init.index = 0,
697 };
698
699 ionic_dev_cmd_go(idev, &cmd);
700}
701
702void ionic_dev_cmd_port_init(struct ionic_dev *idev)
703{
704 union ionic_dev_cmd cmd = {
705 .port_init.opcode = IONIC_CMD_PORT_INIT,
706 .port_init.index = 0,
707 .port_init.info_pa = cpu_to_le64(idev->port_info_pa),
708 };
709
710 ionic_dev_cmd_go(idev, &cmd);
711}
712
713void ionic_dev_cmd_port_reset(struct ionic_dev *idev)
714{
715 union ionic_dev_cmd cmd = {
716 .port_reset.opcode = IONIC_CMD_PORT_RESET,
717 .port_reset.index = 0,
718 };
719
720 ionic_dev_cmd_go(idev, &cmd);
721}
722
723void ionic_dev_cmd_port_state(struct ionic_dev *idev, u8 state)
724{
725 union ionic_dev_cmd cmd = {
726 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
727 .port_setattr.index = 0,
728 .port_setattr.attr = IONIC_PORT_ATTR_STATE,
729 .port_setattr.state = state,
730 };
731
732 ionic_dev_cmd_go(idev, &cmd);
733}
734
735void ionic_dev_cmd_port_speed(struct ionic_dev *idev, u32 speed)
736{
737 union ionic_dev_cmd cmd = {
738 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
739 .port_setattr.index = 0,
740 .port_setattr.attr = IONIC_PORT_ATTR_SPEED,
741 .port_setattr.speed = cpu_to_le32(speed),
742 };
743
744 ionic_dev_cmd_go(idev, &cmd);
745}
746
747void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable)
748{
749 union ionic_dev_cmd cmd = {
750 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
751 .port_setattr.index = 0,
752 .port_setattr.attr = IONIC_PORT_ATTR_AUTONEG,
753 .port_setattr.an_enable = an_enable,
754 };
755
756 ionic_dev_cmd_go(idev, &cmd);
757}
758
759void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type)
760{
761 union ionic_dev_cmd cmd = {
762 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
763 .port_setattr.index = 0,
764 .port_setattr.attr = IONIC_PORT_ATTR_FEC,
765 .port_setattr.fec_type = fec_type,
766 };
767
768 ionic_dev_cmd_go(idev, &cmd);
769}
770
771void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type)
772{
773 union ionic_dev_cmd cmd = {
774 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR,
775 .port_setattr.index = 0,
776 .port_setattr.attr = IONIC_PORT_ATTR_PAUSE,
777 .port_setattr.pause_type = pause_type,
778 };
779
780 ionic_dev_cmd_go(idev, &cmd);
781}
782
783/* VF commands */
784int ionic_set_vf_config(struct ionic *ionic, int vf,
785 struct ionic_vf_setattr_cmd *vfc)
786{
787 union ionic_dev_cmd cmd = {
788 .vf_setattr.opcode = IONIC_CMD_VF_SETATTR,
789 .vf_setattr.attr = vfc->attr,
790 .vf_setattr.vf_index = cpu_to_le16(vf),
791 };
792 int err;
793
794 memcpy(cmd.vf_setattr.pad, vfc->pad, sizeof(vfc->pad));
795
796 mutex_lock(&ionic->dev_cmd_lock);
797 ionic_dev_cmd_go(&ionic->idev, &cmd);
798 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
799 mutex_unlock(&ionic->dev_cmd_lock);
800
801 return err;
802}
803
804void ionic_vf_start(struct ionic *ionic)
805{
806 union ionic_dev_cmd cmd = {
807 .vf_ctrl.opcode = IONIC_CMD_VF_CTRL,
808 .vf_ctrl.ctrl_opcode = IONIC_VF_CTRL_START_ALL,
809 };
810
811 if (!(ionic->ident.dev.capabilities & cpu_to_le64(IONIC_DEV_CAP_VF_CTRL)))
812 return;
813
814 ionic_dev_cmd_go(&ionic->idev, &cmd);
815 ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
816}
817
818/* LIF commands */
819void ionic_dev_cmd_queue_identify(struct ionic_dev *idev,
820 u16 lif_type, u8 qtype, u8 qver)
821{
822 union ionic_dev_cmd cmd = {
823 .q_identify.opcode = IONIC_CMD_Q_IDENTIFY,
824 .q_identify.lif_type = cpu_to_le16(lif_type),
825 .q_identify.type = qtype,
826 .q_identify.ver = qver,
827 };
828
829 ionic_dev_cmd_go(idev, &cmd);
830}
831
832void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver)
833{
834 union ionic_dev_cmd cmd = {
835 .lif_identify.opcode = IONIC_CMD_LIF_IDENTIFY,
836 .lif_identify.type = type,
837 .lif_identify.ver = ver,
838 };
839
840 ionic_dev_cmd_go(idev, &cmd);
841}
842
843void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index,
844 dma_addr_t info_pa)
845{
846 union ionic_dev_cmd cmd = {
847 .lif_init.opcode = IONIC_CMD_LIF_INIT,
848 .lif_init.index = cpu_to_le16(lif_index),
849 .lif_init.info_pa = cpu_to_le64(info_pa),
850 };
851
852 ionic_dev_cmd_go(idev, &cmd);
853}
854
855void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, u16 lif_index)
856{
857 union ionic_dev_cmd cmd = {
858 .lif_init.opcode = IONIC_CMD_LIF_RESET,
859 .lif_init.index = cpu_to_le16(lif_index),
860 };
861
862 ionic_dev_cmd_go(idev, &cmd);
863}
864
865void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
866 u16 lif_index, u16 intr_index)
867{
868 struct ionic_queue *q = &qcq->q;
869 struct ionic_cq *cq = &qcq->cq;
870
871 union ionic_dev_cmd cmd = {
872 .q_init.opcode = IONIC_CMD_Q_INIT,
873 .q_init.lif_index = cpu_to_le16(lif_index),
874 .q_init.type = q->type,
875 .q_init.ver = qcq->q.lif->qtype_info[q->type].version,
876 .q_init.index = cpu_to_le32(q->index),
877 .q_init.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
878 IONIC_QINIT_F_ENA),
879 .q_init.pid = cpu_to_le16(q->pid),
880 .q_init.intr_index = cpu_to_le16(intr_index),
881 .q_init.ring_size = ilog2(q->num_descs),
882 .q_init.ring_base = cpu_to_le64(q->base_pa),
883 .q_init.cq_ring_base = cpu_to_le64(cq->base_pa),
884 };
885
886 ionic_dev_cmd_go(idev, &cmd);
887}
888
889void ionic_dev_cmd_discover_cmb(struct ionic_dev *idev)
890{
891 union ionic_dev_cmd cmd = {
892 .discover_cmb.opcode = IONIC_CMD_DISCOVER_CMB,
893 };
894
895 ionic_dev_cmd_go(idev, &cmd);
896}
897
898int ionic_db_page_num(struct ionic_lif *lif, int pid)
899{
900 return (lif->hw_index * lif->dbid_count) + pid;
901}
902
903int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr,
904 int order, u8 stride_log2, bool *expdb)
905{
906 struct ionic_dev *idev = &lif->ionic->idev;
907 void __iomem *nonexpdb_pgptr;
908 phys_addr_t nonexpdb_pgaddr;
909 int i, idx;
910
911 mutex_lock(&idev->cmb_inuse_lock);
912 idx = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
913 mutex_unlock(&idev->cmb_inuse_lock);
914
915 if (idx < 0)
916 return idx;
917
918 *pgid = (u32)idx;
919
920 if (idev->phy_cmb_expdb64_pages &&
921 stride_log2 == IONIC_EXPDB_64B_WQE_LG2) {
922 *pgaddr = idev->phy_cmb_expdb64_pages + idx * PAGE_SIZE;
923 if (expdb)
924 *expdb = true;
925 } else if (idev->phy_cmb_expdb128_pages &&
926 stride_log2 == IONIC_EXPDB_128B_WQE_LG2) {
927 *pgaddr = idev->phy_cmb_expdb128_pages + idx * PAGE_SIZE;
928 if (expdb)
929 *expdb = true;
930 } else if (idev->phy_cmb_expdb256_pages &&
931 stride_log2 == IONIC_EXPDB_256B_WQE_LG2) {
932 *pgaddr = idev->phy_cmb_expdb256_pages + idx * PAGE_SIZE;
933 if (expdb)
934 *expdb = true;
935 } else if (idev->phy_cmb_expdb512_pages &&
936 stride_log2 == IONIC_EXPDB_512B_WQE_LG2) {
937 *pgaddr = idev->phy_cmb_expdb512_pages + idx * PAGE_SIZE;
938 if (expdb)
939 *expdb = true;
940 } else {
941 *pgaddr = idev->phy_cmb_pages + idx * PAGE_SIZE;
942 if (expdb)
943 *expdb = false;
944 }
945
946 /* clear the requested CMB region, 1 PAGE_SIZE ioremap at a time */
947 nonexpdb_pgaddr = idev->phy_cmb_pages + idx * PAGE_SIZE;
948 for (i = 0; i < (1 << order); i++) {
949 nonexpdb_pgptr =
950 ioremap_wc(nonexpdb_pgaddr + i * PAGE_SIZE, PAGE_SIZE);
951 if (!nonexpdb_pgptr) {
952 ionic_put_cmb(lif, *pgid, order);
953 return -ENOMEM;
954 }
955 memset_io(nonexpdb_pgptr, 0, PAGE_SIZE);
956 iounmap(nonexpdb_pgptr);
957 }
958
959 return 0;
960}
961EXPORT_SYMBOL_NS(ionic_get_cmb, "NET_IONIC");
962
963void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
964{
965 struct ionic_dev *idev = &lif->ionic->idev;
966
967 mutex_lock(&idev->cmb_inuse_lock);
968 bitmap_release_region(idev->cmb_inuse, pgid, order);
969 mutex_unlock(&idev->cmb_inuse_lock);
970}
971EXPORT_SYMBOL_NS(ionic_put_cmb, "NET_IONIC");
972
973int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
974 struct ionic_intr_info *intr,
975 unsigned int num_descs, size_t desc_size)
976{
977 unsigned int ring_size;
978
979 if (desc_size == 0 || !is_power_of_2(num_descs))
980 return -EINVAL;
981
982 ring_size = ilog2(num_descs);
983 if (ring_size < 2 || ring_size > 16)
984 return -EINVAL;
985
986 cq->lif = lif;
987 cq->bound_intr = intr;
988 cq->num_descs = num_descs;
989 cq->desc_size = desc_size;
990 cq->tail_idx = 0;
991 cq->done_color = 1;
992 cq->idev = &lif->ionic->idev;
993
994 return 0;
995}
996
997unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
998 ionic_cq_cb cb, ionic_cq_done_cb done_cb,
999 void *done_arg)
1000{
1001 unsigned int work_done = 0;
1002
1003 if (work_to_do == 0)
1004 return 0;
1005
1006 while (cb(cq)) {
1007 if (cq->tail_idx == cq->num_descs - 1)
1008 cq->done_color = !cq->done_color;
1009
1010 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
1011
1012 if (++work_done >= work_to_do)
1013 break;
1014 }
1015
1016 if (work_done && done_cb)
1017 done_cb(done_arg);
1018
1019 return work_done;
1020}
1021
1022int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
1023 struct ionic_queue *q, unsigned int index, const char *name,
1024 unsigned int num_descs, size_t desc_size,
1025 size_t sg_desc_size, unsigned int pid)
1026{
1027 unsigned int ring_size;
1028
1029 if (desc_size == 0 || !is_power_of_2(num_descs))
1030 return -EINVAL;
1031
1032 ring_size = ilog2(num_descs);
1033 if (ring_size < 2 || ring_size > 16)
1034 return -EINVAL;
1035
1036 q->lif = lif;
1037 q->index = index;
1038 q->num_descs = num_descs;
1039 q->desc_size = desc_size;
1040 q->sg_desc_size = sg_desc_size;
1041 q->tail_idx = 0;
1042 q->head_idx = 0;
1043 q->pid = pid;
1044
1045 snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index);
1046
1047 return 0;
1048}
1049
1050void ionic_q_post(struct ionic_queue *q, bool ring_doorbell)
1051{
1052 struct ionic_lif *lif = q->lif;
1053 struct device *dev = q->dev;
1054
1055 q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
1056
1057 dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n",
1058 q->lif->index, q->name, q->hw_type, q->hw_index,
1059 q->head_idx, ring_doorbell);
1060
1061 if (ring_doorbell) {
1062 ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
1063 q->dbval | q->head_idx);
1064
1065 q->dbell_jiffies = jiffies;
1066 }
1067}
1068
1069bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
1070{
1071 unsigned int mask, tail, head;
1072
1073 mask = q->num_descs - 1;
1074 tail = q->tail_idx;
1075 head = q->head_idx;
1076
1077 return ((pos - tail) & mask) < ((head - tail) & mask);
1078}