Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Microsemi Switchtec(tm) PCIe Management Driver
4 * Copyright (c) 2017, Microsemi Corporation
5 */
6
7#include <linux/switchtec.h>
8#include <linux/switchtec_ioctl.h>
9
10#include <linux/interrupt.h>
11#include <linux/module.h>
12#include <linux/fs.h>
13#include <linux/uaccess.h>
14#include <linux/poll.h>
15#include <linux/wait.h>
16#include <linux/io-64-nonatomic-lo-hi.h>
17#include <linux/nospec.h>
18
19MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
20MODULE_VERSION("0.1");
21MODULE_LICENSE("GPL");
22MODULE_AUTHOR("Microsemi Corporation");
23
24static int max_devices = 16;
25module_param(max_devices, int, 0644);
26MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
27
28static bool use_dma_mrpc = 1;
29module_param(use_dma_mrpc, bool, 0644);
30MODULE_PARM_DESC(use_dma_mrpc,
31 "Enable the use of the DMA MRPC feature");
32
33static dev_t switchtec_devt;
34static DEFINE_IDA(switchtec_minor_ida);
35
36struct class *switchtec_class;
37EXPORT_SYMBOL_GPL(switchtec_class);
38
39enum mrpc_state {
40 MRPC_IDLE = 0,
41 MRPC_QUEUED,
42 MRPC_RUNNING,
43 MRPC_DONE,
44};
45
46struct switchtec_user {
47 struct switchtec_dev *stdev;
48
49 enum mrpc_state state;
50
51 struct completion comp;
52 struct kref kref;
53 struct list_head list;
54
55 u32 cmd;
56 u32 status;
57 u32 return_code;
58 size_t data_len;
59 size_t read_len;
60 unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
61 int event_cnt;
62};
63
64static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
65{
66 struct switchtec_user *stuser;
67
68 stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
69 if (!stuser)
70 return ERR_PTR(-ENOMEM);
71
72 get_device(&stdev->dev);
73 stuser->stdev = stdev;
74 kref_init(&stuser->kref);
75 INIT_LIST_HEAD(&stuser->list);
76 init_completion(&stuser->comp);
77 stuser->event_cnt = atomic_read(&stdev->event_cnt);
78
79 dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
80
81 return stuser;
82}
83
84static void stuser_free(struct kref *kref)
85{
86 struct switchtec_user *stuser;
87
88 stuser = container_of(kref, struct switchtec_user, kref);
89
90 dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
91
92 put_device(&stuser->stdev->dev);
93 kfree(stuser);
94}
95
96static void stuser_put(struct switchtec_user *stuser)
97{
98 kref_put(&stuser->kref, stuser_free);
99}
100
101static void stuser_set_state(struct switchtec_user *stuser,
102 enum mrpc_state state)
103{
104 /* requires the mrpc_mutex to already be held when called */
105
106 const char * const state_names[] = {
107 [MRPC_IDLE] = "IDLE",
108 [MRPC_QUEUED] = "QUEUED",
109 [MRPC_RUNNING] = "RUNNING",
110 [MRPC_DONE] = "DONE",
111 };
112
113 stuser->state = state;
114
115 dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
116 stuser, state_names[state]);
117}
118
119static void mrpc_complete_cmd(struct switchtec_dev *stdev);
120
121static void flush_wc_buf(struct switchtec_dev *stdev)
122{
123 struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
124
125 /*
126 * odb (outbound doorbell) register is processed by low latency
127 * hardware and w/o side effect
128 */
129 mmio_dbmsg = (void __iomem *)stdev->mmio_ntb +
130 SWITCHTEC_NTB_REG_DBMSG_OFFSET;
131 ioread32(&mmio_dbmsg->odb);
132}
133
134static void mrpc_cmd_submit(struct switchtec_dev *stdev)
135{
136 /* requires the mrpc_mutex to already be held when called */
137
138 struct switchtec_user *stuser;
139
140 if (stdev->mrpc_busy)
141 return;
142
143 if (list_empty(&stdev->mrpc_queue))
144 return;
145
146 stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
147 list);
148
149 if (stdev->dma_mrpc) {
150 stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS;
151 memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE);
152 }
153
154 stuser_set_state(stuser, MRPC_RUNNING);
155 stdev->mrpc_busy = 1;
156 memcpy_toio(&stdev->mmio_mrpc->input_data,
157 stuser->data, stuser->data_len);
158 flush_wc_buf(stdev);
159 iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
160
161 schedule_delayed_work(&stdev->mrpc_timeout,
162 msecs_to_jiffies(500));
163}
164
165static int mrpc_queue_cmd(struct switchtec_user *stuser)
166{
167 /* requires the mrpc_mutex to already be held when called */
168
169 struct switchtec_dev *stdev = stuser->stdev;
170
171 kref_get(&stuser->kref);
172 stuser->read_len = sizeof(stuser->data);
173 stuser_set_state(stuser, MRPC_QUEUED);
174 init_completion(&stuser->comp);
175 list_add_tail(&stuser->list, &stdev->mrpc_queue);
176
177 mrpc_cmd_submit(stdev);
178
179 return 0;
180}
181
182static void mrpc_complete_cmd(struct switchtec_dev *stdev)
183{
184 /* requires the mrpc_mutex to already be held when called */
185 struct switchtec_user *stuser;
186
187 if (list_empty(&stdev->mrpc_queue))
188 return;
189
190 stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
191 list);
192
193 if (stdev->dma_mrpc)
194 stuser->status = stdev->dma_mrpc->status;
195 else
196 stuser->status = ioread32(&stdev->mmio_mrpc->status);
197
198 if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
199 return;
200
201 stuser_set_state(stuser, MRPC_DONE);
202 stuser->return_code = 0;
203
204 if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
205 goto out;
206
207 if (stdev->dma_mrpc)
208 stuser->return_code = stdev->dma_mrpc->rtn_code;
209 else
210 stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
211 if (stuser->return_code != 0)
212 goto out;
213
214 if (stdev->dma_mrpc)
215 memcpy(stuser->data, &stdev->dma_mrpc->data,
216 stuser->read_len);
217 else
218 memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
219 stuser->read_len);
220out:
221 complete_all(&stuser->comp);
222 list_del_init(&stuser->list);
223 stuser_put(stuser);
224 stdev->mrpc_busy = 0;
225
226 mrpc_cmd_submit(stdev);
227}
228
229static void mrpc_event_work(struct work_struct *work)
230{
231 struct switchtec_dev *stdev;
232
233 stdev = container_of(work, struct switchtec_dev, mrpc_work);
234
235 dev_dbg(&stdev->dev, "%s\n", __func__);
236
237 mutex_lock(&stdev->mrpc_mutex);
238 cancel_delayed_work(&stdev->mrpc_timeout);
239 mrpc_complete_cmd(stdev);
240 mutex_unlock(&stdev->mrpc_mutex);
241}
242
243static void mrpc_timeout_work(struct work_struct *work)
244{
245 struct switchtec_dev *stdev;
246 u32 status;
247
248 stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
249
250 dev_dbg(&stdev->dev, "%s\n", __func__);
251
252 mutex_lock(&stdev->mrpc_mutex);
253
254 if (stdev->dma_mrpc)
255 status = stdev->dma_mrpc->status;
256 else
257 status = ioread32(&stdev->mmio_mrpc->status);
258 if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
259 schedule_delayed_work(&stdev->mrpc_timeout,
260 msecs_to_jiffies(500));
261 goto out;
262 }
263
264 mrpc_complete_cmd(stdev);
265out:
266 mutex_unlock(&stdev->mrpc_mutex);
267}
268
269static ssize_t device_version_show(struct device *dev,
270 struct device_attribute *attr, char *buf)
271{
272 struct switchtec_dev *stdev = to_stdev(dev);
273 u32 ver;
274
275 ver = ioread32(&stdev->mmio_sys_info->device_version);
276
277 return sprintf(buf, "%x\n", ver);
278}
279static DEVICE_ATTR_RO(device_version);
280
281static ssize_t fw_version_show(struct device *dev,
282 struct device_attribute *attr, char *buf)
283{
284 struct switchtec_dev *stdev = to_stdev(dev);
285 u32 ver;
286
287 ver = ioread32(&stdev->mmio_sys_info->firmware_version);
288
289 return sprintf(buf, "%08x\n", ver);
290}
291static DEVICE_ATTR_RO(fw_version);
292
293static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
294{
295 int i;
296
297 memcpy_fromio(buf, attr, len);
298 buf[len] = '\n';
299 buf[len + 1] = 0;
300
301 for (i = len - 1; i > 0; i--) {
302 if (buf[i] != ' ')
303 break;
304 buf[i] = '\n';
305 buf[i + 1] = 0;
306 }
307
308 return strlen(buf);
309}
310
311#define DEVICE_ATTR_SYS_INFO_STR(field) \
312static ssize_t field ## _show(struct device *dev, \
313 struct device_attribute *attr, char *buf) \
314{ \
315 struct switchtec_dev *stdev = to_stdev(dev); \
316 return io_string_show(buf, &stdev->mmio_sys_info->field, \
317 sizeof(stdev->mmio_sys_info->field)); \
318} \
319\
320static DEVICE_ATTR_RO(field)
321
322DEVICE_ATTR_SYS_INFO_STR(vendor_id);
323DEVICE_ATTR_SYS_INFO_STR(product_id);
324DEVICE_ATTR_SYS_INFO_STR(product_revision);
325DEVICE_ATTR_SYS_INFO_STR(component_vendor);
326
327static ssize_t component_id_show(struct device *dev,
328 struct device_attribute *attr, char *buf)
329{
330 struct switchtec_dev *stdev = to_stdev(dev);
331 int id = ioread16(&stdev->mmio_sys_info->component_id);
332
333 return sprintf(buf, "PM%04X\n", id);
334}
335static DEVICE_ATTR_RO(component_id);
336
337static ssize_t component_revision_show(struct device *dev,
338 struct device_attribute *attr, char *buf)
339{
340 struct switchtec_dev *stdev = to_stdev(dev);
341 int rev = ioread8(&stdev->mmio_sys_info->component_revision);
342
343 return sprintf(buf, "%d\n", rev);
344}
345static DEVICE_ATTR_RO(component_revision);
346
347static ssize_t partition_show(struct device *dev,
348 struct device_attribute *attr, char *buf)
349{
350 struct switchtec_dev *stdev = to_stdev(dev);
351
352 return sprintf(buf, "%d\n", stdev->partition);
353}
354static DEVICE_ATTR_RO(partition);
355
356static ssize_t partition_count_show(struct device *dev,
357 struct device_attribute *attr, char *buf)
358{
359 struct switchtec_dev *stdev = to_stdev(dev);
360
361 return sprintf(buf, "%d\n", stdev->partition_count);
362}
363static DEVICE_ATTR_RO(partition_count);
364
365static struct attribute *switchtec_device_attrs[] = {
366 &dev_attr_device_version.attr,
367 &dev_attr_fw_version.attr,
368 &dev_attr_vendor_id.attr,
369 &dev_attr_product_id.attr,
370 &dev_attr_product_revision.attr,
371 &dev_attr_component_vendor.attr,
372 &dev_attr_component_id.attr,
373 &dev_attr_component_revision.attr,
374 &dev_attr_partition.attr,
375 &dev_attr_partition_count.attr,
376 NULL,
377};
378
379ATTRIBUTE_GROUPS(switchtec_device);
380
381static int switchtec_dev_open(struct inode *inode, struct file *filp)
382{
383 struct switchtec_dev *stdev;
384 struct switchtec_user *stuser;
385
386 stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
387
388 stuser = stuser_create(stdev);
389 if (IS_ERR(stuser))
390 return PTR_ERR(stuser);
391
392 filp->private_data = stuser;
393 nonseekable_open(inode, filp);
394
395 dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
396
397 return 0;
398}
399
400static int switchtec_dev_release(struct inode *inode, struct file *filp)
401{
402 struct switchtec_user *stuser = filp->private_data;
403
404 stuser_put(stuser);
405
406 return 0;
407}
408
409static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
410{
411 if (mutex_lock_interruptible(&stdev->mrpc_mutex))
412 return -EINTR;
413
414 if (!stdev->alive) {
415 mutex_unlock(&stdev->mrpc_mutex);
416 return -ENODEV;
417 }
418
419 return 0;
420}
421
422static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
423 size_t size, loff_t *off)
424{
425 struct switchtec_user *stuser = filp->private_data;
426 struct switchtec_dev *stdev = stuser->stdev;
427 int rc;
428
429 if (size < sizeof(stuser->cmd) ||
430 size > sizeof(stuser->cmd) + sizeof(stuser->data))
431 return -EINVAL;
432
433 stuser->data_len = size - sizeof(stuser->cmd);
434
435 rc = lock_mutex_and_test_alive(stdev);
436 if (rc)
437 return rc;
438
439 if (stuser->state != MRPC_IDLE) {
440 rc = -EBADE;
441 goto out;
442 }
443
444 rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
445 if (rc) {
446 rc = -EFAULT;
447 goto out;
448 }
449
450 data += sizeof(stuser->cmd);
451 rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
452 if (rc) {
453 rc = -EFAULT;
454 goto out;
455 }
456
457 rc = mrpc_queue_cmd(stuser);
458
459out:
460 mutex_unlock(&stdev->mrpc_mutex);
461
462 if (rc)
463 return rc;
464
465 return size;
466}
467
468static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
469 size_t size, loff_t *off)
470{
471 struct switchtec_user *stuser = filp->private_data;
472 struct switchtec_dev *stdev = stuser->stdev;
473 int rc;
474
475 if (size < sizeof(stuser->cmd) ||
476 size > sizeof(stuser->cmd) + sizeof(stuser->data))
477 return -EINVAL;
478
479 rc = lock_mutex_and_test_alive(stdev);
480 if (rc)
481 return rc;
482
483 if (stuser->state == MRPC_IDLE) {
484 mutex_unlock(&stdev->mrpc_mutex);
485 return -EBADE;
486 }
487
488 stuser->read_len = size - sizeof(stuser->return_code);
489
490 mutex_unlock(&stdev->mrpc_mutex);
491
492 if (filp->f_flags & O_NONBLOCK) {
493 if (!try_wait_for_completion(&stuser->comp))
494 return -EAGAIN;
495 } else {
496 rc = wait_for_completion_interruptible(&stuser->comp);
497 if (rc < 0)
498 return rc;
499 }
500
501 rc = lock_mutex_and_test_alive(stdev);
502 if (rc)
503 return rc;
504
505 if (stuser->state != MRPC_DONE) {
506 mutex_unlock(&stdev->mrpc_mutex);
507 return -EBADE;
508 }
509
510 rc = copy_to_user(data, &stuser->return_code,
511 sizeof(stuser->return_code));
512 if (rc) {
513 rc = -EFAULT;
514 goto out;
515 }
516
517 data += sizeof(stuser->return_code);
518 rc = copy_to_user(data, &stuser->data,
519 size - sizeof(stuser->return_code));
520 if (rc) {
521 rc = -EFAULT;
522 goto out;
523 }
524
525 stuser_set_state(stuser, MRPC_IDLE);
526
527out:
528 mutex_unlock(&stdev->mrpc_mutex);
529
530 if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
531 return size;
532 else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
533 return -ENXIO;
534 else
535 return -EBADMSG;
536}
537
538static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
539{
540 struct switchtec_user *stuser = filp->private_data;
541 struct switchtec_dev *stdev = stuser->stdev;
542 __poll_t ret = 0;
543
544 poll_wait(filp, &stuser->comp.wait, wait);
545 poll_wait(filp, &stdev->event_wq, wait);
546
547 if (lock_mutex_and_test_alive(stdev))
548 return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
549
550 mutex_unlock(&stdev->mrpc_mutex);
551
552 if (try_wait_for_completion(&stuser->comp))
553 ret |= EPOLLIN | EPOLLRDNORM;
554
555 if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
556 ret |= EPOLLPRI | EPOLLRDBAND;
557
558 return ret;
559}
560
561static int ioctl_flash_info(struct switchtec_dev *stdev,
562 struct switchtec_ioctl_flash_info __user *uinfo)
563{
564 struct switchtec_ioctl_flash_info info = {0};
565 struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
566
567 info.flash_length = ioread32(&fi->flash_length);
568 info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
569
570 if (copy_to_user(uinfo, &info, sizeof(info)))
571 return -EFAULT;
572
573 return 0;
574}
575
576static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
577 struct partition_info __iomem *pi)
578{
579 info->address = ioread32(&pi->address);
580 info->length = ioread32(&pi->length);
581}
582
583static int ioctl_flash_part_info(struct switchtec_dev *stdev,
584 struct switchtec_ioctl_flash_part_info __user *uinfo)
585{
586 struct switchtec_ioctl_flash_part_info info = {0};
587 struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
588 struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
589 u32 active_addr = -1;
590
591 if (copy_from_user(&info, uinfo, sizeof(info)))
592 return -EFAULT;
593
594 switch (info.flash_partition) {
595 case SWITCHTEC_IOCTL_PART_CFG0:
596 active_addr = ioread32(&fi->active_cfg);
597 set_fw_info_part(&info, &fi->cfg0);
598 if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
599 info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
600 break;
601 case SWITCHTEC_IOCTL_PART_CFG1:
602 active_addr = ioread32(&fi->active_cfg);
603 set_fw_info_part(&info, &fi->cfg1);
604 if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
605 info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
606 break;
607 case SWITCHTEC_IOCTL_PART_IMG0:
608 active_addr = ioread32(&fi->active_img);
609 set_fw_info_part(&info, &fi->img0);
610 if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
611 info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
612 break;
613 case SWITCHTEC_IOCTL_PART_IMG1:
614 active_addr = ioread32(&fi->active_img);
615 set_fw_info_part(&info, &fi->img1);
616 if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
617 info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
618 break;
619 case SWITCHTEC_IOCTL_PART_NVLOG:
620 set_fw_info_part(&info, &fi->nvlog);
621 break;
622 case SWITCHTEC_IOCTL_PART_VENDOR0:
623 set_fw_info_part(&info, &fi->vendor[0]);
624 break;
625 case SWITCHTEC_IOCTL_PART_VENDOR1:
626 set_fw_info_part(&info, &fi->vendor[1]);
627 break;
628 case SWITCHTEC_IOCTL_PART_VENDOR2:
629 set_fw_info_part(&info, &fi->vendor[2]);
630 break;
631 case SWITCHTEC_IOCTL_PART_VENDOR3:
632 set_fw_info_part(&info, &fi->vendor[3]);
633 break;
634 case SWITCHTEC_IOCTL_PART_VENDOR4:
635 set_fw_info_part(&info, &fi->vendor[4]);
636 break;
637 case SWITCHTEC_IOCTL_PART_VENDOR5:
638 set_fw_info_part(&info, &fi->vendor[5]);
639 break;
640 case SWITCHTEC_IOCTL_PART_VENDOR6:
641 set_fw_info_part(&info, &fi->vendor[6]);
642 break;
643 case SWITCHTEC_IOCTL_PART_VENDOR7:
644 set_fw_info_part(&info, &fi->vendor[7]);
645 break;
646 default:
647 return -EINVAL;
648 }
649
650 if (info.address == active_addr)
651 info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
652
653 if (copy_to_user(uinfo, &info, sizeof(info)))
654 return -EFAULT;
655
656 return 0;
657}
658
659static int ioctl_event_summary(struct switchtec_dev *stdev,
660 struct switchtec_user *stuser,
661 struct switchtec_ioctl_event_summary __user *usum)
662{
663 struct switchtec_ioctl_event_summary s = {0};
664 int i;
665 u32 reg;
666
667 s.global = ioread32(&stdev->mmio_sw_event->global_summary);
668 s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
669 s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
670
671 for (i = 0; i < stdev->partition_count; i++) {
672 reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
673 s.part[i] = reg;
674 }
675
676 for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
677 reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
678 if (reg != PCI_VENDOR_ID_MICROSEMI)
679 break;
680
681 reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
682 s.pff[i] = reg;
683 }
684
685 if (copy_to_user(usum, &s, sizeof(s)))
686 return -EFAULT;
687
688 stuser->event_cnt = atomic_read(&stdev->event_cnt);
689
690 return 0;
691}
692
693static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
694 size_t offset, int index)
695{
696 return (void __iomem *)stdev->mmio_sw_event + offset;
697}
698
699static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
700 size_t offset, int index)
701{
702 return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
703}
704
705static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
706 size_t offset, int index)
707{
708 return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
709}
710
711#define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
712#define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
713#define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
714
715static const struct event_reg {
716 size_t offset;
717 u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
718 size_t offset, int index);
719} event_regs[] = {
720 EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
721 EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
722 EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
723 EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
724 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
725 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
726 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
727 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
728 EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
729 EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
730 twi_mrpc_comp_async_hdr),
731 EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
732 EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
733 cli_mrpc_comp_async_hdr),
734 EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
735 EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
736 EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
737 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
738 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
739 EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
740 EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
741 EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
742 EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
743 EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
744 EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
745 EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
746 EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
747 EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
748 EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
749 EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
750 EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
751 EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
752};
753
754static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
755 int event_id, int index)
756{
757 size_t off;
758
759 if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
760 return ERR_PTR(-EINVAL);
761
762 off = event_regs[event_id].offset;
763
764 if (event_regs[event_id].map_reg == part_ev_reg) {
765 if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
766 index = stdev->partition;
767 else if (index < 0 || index >= stdev->partition_count)
768 return ERR_PTR(-EINVAL);
769 } else if (event_regs[event_id].map_reg == pff_ev_reg) {
770 if (index < 0 || index >= stdev->pff_csr_count)
771 return ERR_PTR(-EINVAL);
772 }
773
774 return event_regs[event_id].map_reg(stdev, off, index);
775}
776
777static int event_ctl(struct switchtec_dev *stdev,
778 struct switchtec_ioctl_event_ctl *ctl)
779{
780 int i;
781 u32 __iomem *reg;
782 u32 hdr;
783
784 reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
785 if (IS_ERR(reg))
786 return PTR_ERR(reg);
787
788 hdr = ioread32(reg);
789 for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
790 ctl->data[i] = ioread32(®[i + 1]);
791
792 ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
793 ctl->count = (hdr >> 5) & 0xFF;
794
795 if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
796 hdr &= ~SWITCHTEC_EVENT_CLEAR;
797 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
798 hdr |= SWITCHTEC_EVENT_EN_IRQ;
799 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
800 hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
801 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
802 hdr |= SWITCHTEC_EVENT_EN_LOG;
803 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
804 hdr &= ~SWITCHTEC_EVENT_EN_LOG;
805 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
806 hdr |= SWITCHTEC_EVENT_EN_CLI;
807 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
808 hdr &= ~SWITCHTEC_EVENT_EN_CLI;
809 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
810 hdr |= SWITCHTEC_EVENT_FATAL;
811 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
812 hdr &= ~SWITCHTEC_EVENT_FATAL;
813
814 if (ctl->flags)
815 iowrite32(hdr, reg);
816
817 ctl->flags = 0;
818 if (hdr & SWITCHTEC_EVENT_EN_IRQ)
819 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
820 if (hdr & SWITCHTEC_EVENT_EN_LOG)
821 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
822 if (hdr & SWITCHTEC_EVENT_EN_CLI)
823 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
824 if (hdr & SWITCHTEC_EVENT_FATAL)
825 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
826
827 return 0;
828}
829
830static int ioctl_event_ctl(struct switchtec_dev *stdev,
831 struct switchtec_ioctl_event_ctl __user *uctl)
832{
833 int ret;
834 int nr_idxs;
835 unsigned int event_flags;
836 struct switchtec_ioctl_event_ctl ctl;
837
838 if (copy_from_user(&ctl, uctl, sizeof(ctl)))
839 return -EFAULT;
840
841 if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
842 return -EINVAL;
843
844 if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
845 return -EINVAL;
846
847 if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
848 if (event_regs[ctl.event_id].map_reg == global_ev_reg)
849 nr_idxs = 1;
850 else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
851 nr_idxs = stdev->partition_count;
852 else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
853 nr_idxs = stdev->pff_csr_count;
854 else
855 return -EINVAL;
856
857 event_flags = ctl.flags;
858 for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
859 ctl.flags = event_flags;
860 ret = event_ctl(stdev, &ctl);
861 if (ret < 0)
862 return ret;
863 }
864 } else {
865 ret = event_ctl(stdev, &ctl);
866 if (ret < 0)
867 return ret;
868 }
869
870 if (copy_to_user(uctl, &ctl, sizeof(ctl)))
871 return -EFAULT;
872
873 return 0;
874}
875
876static int ioctl_pff_to_port(struct switchtec_dev *stdev,
877 struct switchtec_ioctl_pff_port *up)
878{
879 int i, part;
880 u32 reg;
881 struct part_cfg_regs *pcfg;
882 struct switchtec_ioctl_pff_port p;
883
884 if (copy_from_user(&p, up, sizeof(p)))
885 return -EFAULT;
886
887 p.port = -1;
888 for (part = 0; part < stdev->partition_count; part++) {
889 pcfg = &stdev->mmio_part_cfg_all[part];
890 p.partition = part;
891
892 reg = ioread32(&pcfg->usp_pff_inst_id);
893 if (reg == p.pff) {
894 p.port = 0;
895 break;
896 }
897
898 reg = ioread32(&pcfg->vep_pff_inst_id);
899 if (reg == p.pff) {
900 p.port = SWITCHTEC_IOCTL_PFF_VEP;
901 break;
902 }
903
904 for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
905 reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
906 if (reg != p.pff)
907 continue;
908
909 p.port = i + 1;
910 break;
911 }
912
913 if (p.port != -1)
914 break;
915 }
916
917 if (copy_to_user(up, &p, sizeof(p)))
918 return -EFAULT;
919
920 return 0;
921}
922
923static int ioctl_port_to_pff(struct switchtec_dev *stdev,
924 struct switchtec_ioctl_pff_port *up)
925{
926 struct switchtec_ioctl_pff_port p;
927 struct part_cfg_regs *pcfg;
928
929 if (copy_from_user(&p, up, sizeof(p)))
930 return -EFAULT;
931
932 if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
933 pcfg = stdev->mmio_part_cfg;
934 else if (p.partition < stdev->partition_count)
935 pcfg = &stdev->mmio_part_cfg_all[p.partition];
936 else
937 return -EINVAL;
938
939 switch (p.port) {
940 case 0:
941 p.pff = ioread32(&pcfg->usp_pff_inst_id);
942 break;
943 case SWITCHTEC_IOCTL_PFF_VEP:
944 p.pff = ioread32(&pcfg->vep_pff_inst_id);
945 break;
946 default:
947 if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
948 return -EINVAL;
949 p.port = array_index_nospec(p.port,
950 ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
951 p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
952 break;
953 }
954
955 if (copy_to_user(up, &p, sizeof(p)))
956 return -EFAULT;
957
958 return 0;
959}
960
961static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
962 unsigned long arg)
963{
964 struct switchtec_user *stuser = filp->private_data;
965 struct switchtec_dev *stdev = stuser->stdev;
966 int rc;
967 void __user *argp = (void __user *)arg;
968
969 rc = lock_mutex_and_test_alive(stdev);
970 if (rc)
971 return rc;
972
973 switch (cmd) {
974 case SWITCHTEC_IOCTL_FLASH_INFO:
975 rc = ioctl_flash_info(stdev, argp);
976 break;
977 case SWITCHTEC_IOCTL_FLASH_PART_INFO:
978 rc = ioctl_flash_part_info(stdev, argp);
979 break;
980 case SWITCHTEC_IOCTL_EVENT_SUMMARY:
981 rc = ioctl_event_summary(stdev, stuser, argp);
982 break;
983 case SWITCHTEC_IOCTL_EVENT_CTL:
984 rc = ioctl_event_ctl(stdev, argp);
985 break;
986 case SWITCHTEC_IOCTL_PFF_TO_PORT:
987 rc = ioctl_pff_to_port(stdev, argp);
988 break;
989 case SWITCHTEC_IOCTL_PORT_TO_PFF:
990 rc = ioctl_port_to_pff(stdev, argp);
991 break;
992 default:
993 rc = -ENOTTY;
994 break;
995 }
996
997 mutex_unlock(&stdev->mrpc_mutex);
998 return rc;
999}
1000
1001static const struct file_operations switchtec_fops = {
1002 .owner = THIS_MODULE,
1003 .open = switchtec_dev_open,
1004 .release = switchtec_dev_release,
1005 .write = switchtec_dev_write,
1006 .read = switchtec_dev_read,
1007 .poll = switchtec_dev_poll,
1008 .unlocked_ioctl = switchtec_dev_ioctl,
1009 .compat_ioctl = switchtec_dev_ioctl,
1010};
1011
1012static void link_event_work(struct work_struct *work)
1013{
1014 struct switchtec_dev *stdev;
1015
1016 stdev = container_of(work, struct switchtec_dev, link_event_work);
1017
1018 if (stdev->link_notifier)
1019 stdev->link_notifier(stdev);
1020}
1021
1022static void check_link_state_events(struct switchtec_dev *stdev)
1023{
1024 int idx;
1025 u32 reg;
1026 int count;
1027 int occurred = 0;
1028
1029 for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1030 reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
1031 dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
1032 count = (reg >> 5) & 0xFF;
1033
1034 if (count != stdev->link_event_count[idx]) {
1035 occurred = 1;
1036 stdev->link_event_count[idx] = count;
1037 }
1038 }
1039
1040 if (occurred)
1041 schedule_work(&stdev->link_event_work);
1042}
1043
1044static void enable_link_state_events(struct switchtec_dev *stdev)
1045{
1046 int idx;
1047
1048 for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1049 iowrite32(SWITCHTEC_EVENT_CLEAR |
1050 SWITCHTEC_EVENT_EN_IRQ,
1051 &stdev->mmio_pff_csr[idx].link_state_hdr);
1052 }
1053}
1054
1055static void enable_dma_mrpc(struct switchtec_dev *stdev)
1056{
1057 writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr);
1058 flush_wc_buf(stdev);
1059 iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en);
1060}
1061
1062static void stdev_release(struct device *dev)
1063{
1064 struct switchtec_dev *stdev = to_stdev(dev);
1065
1066 if (stdev->dma_mrpc) {
1067 iowrite32(0, &stdev->mmio_mrpc->dma_en);
1068 flush_wc_buf(stdev);
1069 writeq(0, &stdev->mmio_mrpc->dma_addr);
1070 dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
1071 stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
1072 }
1073 kfree(stdev);
1074}
1075
1076static void stdev_kill(struct switchtec_dev *stdev)
1077{
1078 struct switchtec_user *stuser, *tmpuser;
1079
1080 pci_clear_master(stdev->pdev);
1081
1082 cancel_delayed_work_sync(&stdev->mrpc_timeout);
1083
1084 /* Mark the hardware as unavailable and complete all completions */
1085 mutex_lock(&stdev->mrpc_mutex);
1086 stdev->alive = false;
1087
1088 /* Wake up and kill any users waiting on an MRPC request */
1089 list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
1090 complete_all(&stuser->comp);
1091 list_del_init(&stuser->list);
1092 stuser_put(stuser);
1093 }
1094
1095 mutex_unlock(&stdev->mrpc_mutex);
1096
1097 /* Wake up any users waiting on event_wq */
1098 wake_up_interruptible(&stdev->event_wq);
1099}
1100
1101static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
1102{
1103 struct switchtec_dev *stdev;
1104 int minor;
1105 struct device *dev;
1106 struct cdev *cdev;
1107 int rc;
1108
1109 stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
1110 dev_to_node(&pdev->dev));
1111 if (!stdev)
1112 return ERR_PTR(-ENOMEM);
1113
1114 stdev->alive = true;
1115 stdev->pdev = pdev;
1116 INIT_LIST_HEAD(&stdev->mrpc_queue);
1117 mutex_init(&stdev->mrpc_mutex);
1118 stdev->mrpc_busy = 0;
1119 INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
1120 INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
1121 INIT_WORK(&stdev->link_event_work, link_event_work);
1122 init_waitqueue_head(&stdev->event_wq);
1123 atomic_set(&stdev->event_cnt, 0);
1124
1125 dev = &stdev->dev;
1126 device_initialize(dev);
1127 dev->class = switchtec_class;
1128 dev->parent = &pdev->dev;
1129 dev->groups = switchtec_device_groups;
1130 dev->release = stdev_release;
1131
1132 minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
1133 GFP_KERNEL);
1134 if (minor < 0) {
1135 rc = minor;
1136 goto err_put;
1137 }
1138
1139 dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
1140 dev_set_name(dev, "switchtec%d", minor);
1141
1142 cdev = &stdev->cdev;
1143 cdev_init(cdev, &switchtec_fops);
1144 cdev->owner = THIS_MODULE;
1145
1146 return stdev;
1147
1148err_put:
1149 put_device(&stdev->dev);
1150 return ERR_PTR(rc);
1151}
1152
1153static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
1154{
1155 size_t off = event_regs[eid].offset;
1156 u32 __iomem *hdr_reg;
1157 u32 hdr;
1158
1159 hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
1160 hdr = ioread32(hdr_reg);
1161
1162 if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
1163 return 0;
1164
1165 if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
1166 return 0;
1167
1168 dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
1169 hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
1170 iowrite32(hdr, hdr_reg);
1171
1172 return 1;
1173}
1174
1175static int mask_all_events(struct switchtec_dev *stdev, int eid)
1176{
1177 int idx;
1178 int count = 0;
1179
1180 if (event_regs[eid].map_reg == part_ev_reg) {
1181 for (idx = 0; idx < stdev->partition_count; idx++)
1182 count += mask_event(stdev, eid, idx);
1183 } else if (event_regs[eid].map_reg == pff_ev_reg) {
1184 for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1185 if (!stdev->pff_local[idx])
1186 continue;
1187
1188 count += mask_event(stdev, eid, idx);
1189 }
1190 } else {
1191 count += mask_event(stdev, eid, 0);
1192 }
1193
1194 return count;
1195}
1196
1197static irqreturn_t switchtec_event_isr(int irq, void *dev)
1198{
1199 struct switchtec_dev *stdev = dev;
1200 u32 reg;
1201 irqreturn_t ret = IRQ_NONE;
1202 int eid, event_count = 0;
1203
1204 reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
1205 if (reg & SWITCHTEC_EVENT_OCCURRED) {
1206 dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
1207 ret = IRQ_HANDLED;
1208 schedule_work(&stdev->mrpc_work);
1209 iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
1210 }
1211
1212 check_link_state_events(stdev);
1213
1214 for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
1215 event_count += mask_all_events(stdev, eid);
1216
1217 if (event_count) {
1218 atomic_inc(&stdev->event_cnt);
1219 wake_up_interruptible(&stdev->event_wq);
1220 dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
1221 event_count);
1222 return IRQ_HANDLED;
1223 }
1224
1225 return ret;
1226}
1227
1228
1229static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev)
1230{
1231 struct switchtec_dev *stdev = dev;
1232 irqreturn_t ret = IRQ_NONE;
1233
1234 iowrite32(SWITCHTEC_EVENT_CLEAR |
1235 SWITCHTEC_EVENT_EN_IRQ,
1236 &stdev->mmio_part_cfg->mrpc_comp_hdr);
1237 schedule_work(&stdev->mrpc_work);
1238
1239 ret = IRQ_HANDLED;
1240 return ret;
1241}
1242
1243static int switchtec_init_isr(struct switchtec_dev *stdev)
1244{
1245 int nvecs;
1246 int event_irq;
1247 int dma_mrpc_irq;
1248 int rc;
1249
1250 nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
1251 PCI_IRQ_MSIX | PCI_IRQ_MSI);
1252 if (nvecs < 0)
1253 return nvecs;
1254
1255 event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
1256 if (event_irq < 0 || event_irq >= nvecs)
1257 return -EFAULT;
1258
1259 event_irq = pci_irq_vector(stdev->pdev, event_irq);
1260 if (event_irq < 0)
1261 return event_irq;
1262
1263 rc = devm_request_irq(&stdev->pdev->dev, event_irq,
1264 switchtec_event_isr, 0,
1265 KBUILD_MODNAME, stdev);
1266
1267 if (rc)
1268 return rc;
1269
1270 if (!stdev->dma_mrpc)
1271 return rc;
1272
1273 dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector);
1274 if (dma_mrpc_irq < 0 || dma_mrpc_irq >= nvecs)
1275 return -EFAULT;
1276
1277 dma_mrpc_irq = pci_irq_vector(stdev->pdev, dma_mrpc_irq);
1278 if (dma_mrpc_irq < 0)
1279 return dma_mrpc_irq;
1280
1281 rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq,
1282 switchtec_dma_mrpc_isr, 0,
1283 KBUILD_MODNAME, stdev);
1284
1285 return rc;
1286}
1287
1288static void init_pff(struct switchtec_dev *stdev)
1289{
1290 int i;
1291 u32 reg;
1292 struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
1293
1294 for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
1295 reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
1296 if (reg != PCI_VENDOR_ID_MICROSEMI)
1297 break;
1298 }
1299
1300 stdev->pff_csr_count = i;
1301
1302 reg = ioread32(&pcfg->usp_pff_inst_id);
1303 if (reg < SWITCHTEC_MAX_PFF_CSR)
1304 stdev->pff_local[reg] = 1;
1305
1306 reg = ioread32(&pcfg->vep_pff_inst_id);
1307 if (reg < SWITCHTEC_MAX_PFF_CSR)
1308 stdev->pff_local[reg] = 1;
1309
1310 for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
1311 reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
1312 if (reg < SWITCHTEC_MAX_PFF_CSR)
1313 stdev->pff_local[reg] = 1;
1314 }
1315}
1316
1317static int switchtec_init_pci(struct switchtec_dev *stdev,
1318 struct pci_dev *pdev)
1319{
1320 int rc;
1321 void __iomem *map;
1322 unsigned long res_start, res_len;
1323
1324 rc = pcim_enable_device(pdev);
1325 if (rc)
1326 return rc;
1327
1328 rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1329 if (rc)
1330 return rc;
1331
1332 pci_set_master(pdev);
1333
1334 res_start = pci_resource_start(pdev, 0);
1335 res_len = pci_resource_len(pdev, 0);
1336
1337 if (!devm_request_mem_region(&pdev->dev, res_start,
1338 res_len, KBUILD_MODNAME))
1339 return -EBUSY;
1340
1341 stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start,
1342 SWITCHTEC_GAS_TOP_CFG_OFFSET);
1343 if (!stdev->mmio_mrpc)
1344 return -ENOMEM;
1345
1346 map = devm_ioremap(&pdev->dev,
1347 res_start + SWITCHTEC_GAS_TOP_CFG_OFFSET,
1348 res_len - SWITCHTEC_GAS_TOP_CFG_OFFSET);
1349 if (!map)
1350 return -ENOMEM;
1351
1352 stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET;
1353 stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
1354 stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
1355 stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
1356 stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
1357 stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
1358 stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
1359 stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
1360 stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
1361 stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
1362
1363 if (stdev->partition_count < 1)
1364 stdev->partition_count = 1;
1365
1366 init_pff(stdev);
1367
1368 pci_set_drvdata(pdev, stdev);
1369
1370 if (!use_dma_mrpc)
1371 return 0;
1372
1373 if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
1374 return 0;
1375
1376 stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
1377 sizeof(*stdev->dma_mrpc),
1378 &stdev->dma_mrpc_dma_addr,
1379 GFP_KERNEL);
1380 if (stdev->dma_mrpc == NULL)
1381 return -ENOMEM;
1382
1383 return 0;
1384}
1385
1386static int switchtec_pci_probe(struct pci_dev *pdev,
1387 const struct pci_device_id *id)
1388{
1389 struct switchtec_dev *stdev;
1390 int rc;
1391
1392 if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
1393 request_module_nowait("ntb_hw_switchtec");
1394
1395 stdev = stdev_create(pdev);
1396 if (IS_ERR(stdev))
1397 return PTR_ERR(stdev);
1398
1399 rc = switchtec_init_pci(stdev, pdev);
1400 if (rc)
1401 goto err_put;
1402
1403 rc = switchtec_init_isr(stdev);
1404 if (rc) {
1405 dev_err(&stdev->dev, "failed to init isr.\n");
1406 goto err_put;
1407 }
1408
1409 iowrite32(SWITCHTEC_EVENT_CLEAR |
1410 SWITCHTEC_EVENT_EN_IRQ,
1411 &stdev->mmio_part_cfg->mrpc_comp_hdr);
1412 enable_link_state_events(stdev);
1413
1414 if (stdev->dma_mrpc)
1415 enable_dma_mrpc(stdev);
1416
1417 rc = cdev_device_add(&stdev->cdev, &stdev->dev);
1418 if (rc)
1419 goto err_devadd;
1420
1421 dev_info(&stdev->dev, "Management device registered.\n");
1422
1423 return 0;
1424
1425err_devadd:
1426 stdev_kill(stdev);
1427err_put:
1428 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1429 put_device(&stdev->dev);
1430 return rc;
1431}
1432
1433static void switchtec_pci_remove(struct pci_dev *pdev)
1434{
1435 struct switchtec_dev *stdev = pci_get_drvdata(pdev);
1436
1437 pci_set_drvdata(pdev, NULL);
1438
1439 cdev_device_del(&stdev->cdev, &stdev->dev);
1440 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1441 dev_info(&stdev->dev, "unregistered.\n");
1442 stdev_kill(stdev);
1443 put_device(&stdev->dev);
1444}
1445
1446#define SWITCHTEC_PCI_DEVICE(device_id) \
1447 { \
1448 .vendor = PCI_VENDOR_ID_MICROSEMI, \
1449 .device = device_id, \
1450 .subvendor = PCI_ANY_ID, \
1451 .subdevice = PCI_ANY_ID, \
1452 .class = (PCI_CLASS_MEMORY_OTHER << 8), \
1453 .class_mask = 0xFFFFFFFF, \
1454 }, \
1455 { \
1456 .vendor = PCI_VENDOR_ID_MICROSEMI, \
1457 .device = device_id, \
1458 .subvendor = PCI_ANY_ID, \
1459 .subdevice = PCI_ANY_ID, \
1460 .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
1461 .class_mask = 0xFFFFFFFF, \
1462 }
1463
1464static const struct pci_device_id switchtec_pci_tbl[] = {
1465 SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3
1466 SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3
1467 SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3
1468 SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
1469 SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
1470 SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
1471 SWITCHTEC_PCI_DEVICE(0x8541), //PSX 24xG3
1472 SWITCHTEC_PCI_DEVICE(0x8542), //PSX 32xG3
1473 SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
1474 SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
1475 SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
1476 SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
1477 SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3
1478 SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3
1479 SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3
1480 SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3
1481 SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3
1482 SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3
1483 SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3
1484 SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3
1485 SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3
1486 SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3
1487 SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3
1488 SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3
1489 SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3
1490 SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3
1491 SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3
1492 SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3
1493 SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3
1494 SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3
1495 {0}
1496};
1497MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
1498
1499static struct pci_driver switchtec_pci_driver = {
1500 .name = KBUILD_MODNAME,
1501 .id_table = switchtec_pci_tbl,
1502 .probe = switchtec_pci_probe,
1503 .remove = switchtec_pci_remove,
1504};
1505
1506static int __init switchtec_init(void)
1507{
1508 int rc;
1509
1510 rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
1511 "switchtec");
1512 if (rc)
1513 return rc;
1514
1515 switchtec_class = class_create(THIS_MODULE, "switchtec");
1516 if (IS_ERR(switchtec_class)) {
1517 rc = PTR_ERR(switchtec_class);
1518 goto err_create_class;
1519 }
1520
1521 rc = pci_register_driver(&switchtec_pci_driver);
1522 if (rc)
1523 goto err_pci_register;
1524
1525 pr_info(KBUILD_MODNAME ": loaded.\n");
1526
1527 return 0;
1528
1529err_pci_register:
1530 class_destroy(switchtec_class);
1531
1532err_create_class:
1533 unregister_chrdev_region(switchtec_devt, max_devices);
1534
1535 return rc;
1536}
1537module_init(switchtec_init);
1538
1539static void __exit switchtec_exit(void)
1540{
1541 pci_unregister_driver(&switchtec_pci_driver);
1542 class_destroy(switchtec_class);
1543 unregister_chrdev_region(switchtec_devt, max_devices);
1544 ida_destroy(&switchtec_minor_ida);
1545
1546 pr_info(KBUILD_MODNAME ": unloaded.\n");
1547}
1548module_exit(switchtec_exit);