Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Intel MIC Platform Software Stack (MPSS)
4 *
5 * Copyright(c) 2014 Intel Corporation.
6 *
7 * Intel SCIF driver.
8 */
9#include <linux/module.h>
10#include <linux/idr.h>
11
12#include <linux/mic_common.h>
13#include "../common/mic_dev.h"
14#include "../bus/scif_bus.h"
15#include "scif_peer_bus.h"
16#include "scif_main.h"
17#include "scif_map.h"
18
19struct scif_info scif_info = {
20 .mdev = {
21 .minor = MISC_DYNAMIC_MINOR,
22 .name = "scif",
23 .fops = &scif_fops,
24 }
25};
26
27struct scif_dev *scif_dev;
28struct kmem_cache *unaligned_cache;
29static atomic_t g_loopb_cnt;
30
31/* Runs in the context of intr_wq */
32static void scif_intr_bh_handler(struct work_struct *work)
33{
34 struct scif_dev *scifdev =
35 container_of(work, struct scif_dev, intr_bh);
36
37 if (scifdev_self(scifdev))
38 scif_loopb_msg_handler(scifdev, scifdev->qpairs);
39 else
40 scif_nodeqp_intrhandler(scifdev, scifdev->qpairs);
41}
42
43int scif_setup_intr_wq(struct scif_dev *scifdev)
44{
45 if (!scifdev->intr_wq) {
46 snprintf(scifdev->intr_wqname, sizeof(scifdev->intr_wqname),
47 "SCIF INTR %d", scifdev->node);
48 scifdev->intr_wq =
49 alloc_ordered_workqueue(scifdev->intr_wqname, 0);
50 if (!scifdev->intr_wq)
51 return -ENOMEM;
52 INIT_WORK(&scifdev->intr_bh, scif_intr_bh_handler);
53 }
54 return 0;
55}
56
57void scif_destroy_intr_wq(struct scif_dev *scifdev)
58{
59 if (scifdev->intr_wq) {
60 destroy_workqueue(scifdev->intr_wq);
61 scifdev->intr_wq = NULL;
62 }
63}
64
65irqreturn_t scif_intr_handler(int irq, void *data)
66{
67 struct scif_dev *scifdev = data;
68 struct scif_hw_dev *sdev = scifdev->sdev;
69
70 sdev->hw_ops->ack_interrupt(sdev, scifdev->db);
71 queue_work(scifdev->intr_wq, &scifdev->intr_bh);
72 return IRQ_HANDLED;
73}
74
75static void scif_qp_setup_handler(struct work_struct *work)
76{
77 struct scif_dev *scifdev = container_of(work, struct scif_dev,
78 qp_dwork.work);
79 struct scif_hw_dev *sdev = scifdev->sdev;
80 dma_addr_t da = 0;
81 int err;
82
83 if (scif_is_mgmt_node()) {
84 struct mic_bootparam *bp = sdev->dp;
85
86 da = bp->scif_card_dma_addr;
87 scifdev->rdb = bp->h2c_scif_db;
88 } else {
89 struct mic_bootparam __iomem *bp = sdev->rdp;
90
91 da = readq(&bp->scif_host_dma_addr);
92 scifdev->rdb = ioread8(&bp->c2h_scif_db);
93 }
94 if (da) {
95 err = scif_qp_response(da, scifdev);
96 if (err)
97 dev_err(&scifdev->sdev->dev,
98 "scif_qp_response err %d\n", err);
99 } else {
100 schedule_delayed_work(&scifdev->qp_dwork,
101 msecs_to_jiffies(1000));
102 }
103}
104
105static int scif_setup_scifdev(void)
106{
107 /* We support a maximum of 129 SCIF nodes including the mgmt node */
108#define MAX_SCIF_NODES 129
109 int i;
110 u8 num_nodes = MAX_SCIF_NODES;
111
112 scif_dev = kcalloc(num_nodes, sizeof(*scif_dev), GFP_KERNEL);
113 if (!scif_dev)
114 return -ENOMEM;
115 for (i = 0; i < num_nodes; i++) {
116 struct scif_dev *scifdev = &scif_dev[i];
117
118 scifdev->node = i;
119 scifdev->exit = OP_IDLE;
120 init_waitqueue_head(&scifdev->disconn_wq);
121 mutex_init(&scifdev->lock);
122 INIT_WORK(&scifdev->peer_add_work, scif_add_peer_device);
123 INIT_DELAYED_WORK(&scifdev->p2p_dwork,
124 scif_poll_qp_state);
125 INIT_DELAYED_WORK(&scifdev->qp_dwork,
126 scif_qp_setup_handler);
127 INIT_LIST_HEAD(&scifdev->p2p);
128 RCU_INIT_POINTER(scifdev->spdev, NULL);
129 }
130 return 0;
131}
132
133static void scif_destroy_scifdev(void)
134{
135 kfree(scif_dev);
136}
137
138static int scif_probe(struct scif_hw_dev *sdev)
139{
140 struct scif_dev *scifdev = &scif_dev[sdev->dnode];
141 int rc;
142
143 dev_set_drvdata(&sdev->dev, sdev);
144 scifdev->sdev = sdev;
145
146 if (1 == atomic_add_return(1, &g_loopb_cnt)) {
147 struct scif_dev *loopb_dev = &scif_dev[sdev->snode];
148
149 loopb_dev->sdev = sdev;
150 rc = scif_setup_loopback_qp(loopb_dev);
151 if (rc)
152 goto exit;
153 }
154
155 rc = scif_setup_intr_wq(scifdev);
156 if (rc)
157 goto destroy_loopb;
158 rc = scif_setup_qp(scifdev);
159 if (rc)
160 goto destroy_intr;
161 scifdev->db = sdev->hw_ops->next_db(sdev);
162 scifdev->cookie = sdev->hw_ops->request_irq(sdev, scif_intr_handler,
163 "SCIF_INTR", scifdev,
164 scifdev->db);
165 if (IS_ERR(scifdev->cookie)) {
166 rc = PTR_ERR(scifdev->cookie);
167 goto free_qp;
168 }
169 if (scif_is_mgmt_node()) {
170 struct mic_bootparam *bp = sdev->dp;
171
172 bp->c2h_scif_db = scifdev->db;
173 bp->scif_host_dma_addr = scifdev->qp_dma_addr;
174 } else {
175 struct mic_bootparam __iomem *bp = sdev->rdp;
176
177 iowrite8(scifdev->db, &bp->h2c_scif_db);
178 writeq(scifdev->qp_dma_addr, &bp->scif_card_dma_addr);
179 }
180 schedule_delayed_work(&scifdev->qp_dwork,
181 msecs_to_jiffies(1000));
182 return rc;
183free_qp:
184 scif_free_qp(scifdev);
185destroy_intr:
186 scif_destroy_intr_wq(scifdev);
187destroy_loopb:
188 if (atomic_dec_and_test(&g_loopb_cnt))
189 scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
190exit:
191 return rc;
192}
193
194void scif_stop(struct scif_dev *scifdev)
195{
196 struct scif_dev *dev;
197 int i;
198
199 for (i = scif_info.maxid; i >= 0; i--) {
200 dev = &scif_dev[i];
201 if (scifdev_self(dev))
202 continue;
203 scif_handle_remove_node(i);
204 }
205}
206
207static void scif_remove(struct scif_hw_dev *sdev)
208{
209 struct scif_dev *scifdev = &scif_dev[sdev->dnode];
210
211 if (scif_is_mgmt_node()) {
212 struct mic_bootparam *bp = sdev->dp;
213
214 bp->c2h_scif_db = -1;
215 bp->scif_host_dma_addr = 0x0;
216 } else {
217 struct mic_bootparam __iomem *bp = sdev->rdp;
218
219 iowrite8(-1, &bp->h2c_scif_db);
220 writeq(0x0, &bp->scif_card_dma_addr);
221 }
222 if (scif_is_mgmt_node()) {
223 scif_disconnect_node(scifdev->node, true);
224 } else {
225 scif_info.card_initiated_exit = true;
226 scif_stop(scifdev);
227 }
228 if (atomic_dec_and_test(&g_loopb_cnt))
229 scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
230 if (scifdev->cookie) {
231 sdev->hw_ops->free_irq(sdev, scifdev->cookie, scifdev);
232 scifdev->cookie = NULL;
233 }
234 scif_destroy_intr_wq(scifdev);
235 cancel_delayed_work(&scifdev->qp_dwork);
236 scif_free_qp(scifdev);
237 scifdev->rdb = -1;
238 scifdev->sdev = NULL;
239}
240
241static struct scif_hw_dev_id id_table[] = {
242 { MIC_SCIF_DEV, SCIF_DEV_ANY_ID },
243 { 0 },
244};
245
246static struct scif_driver scif_driver = {
247 .driver.name = KBUILD_MODNAME,
248 .driver.owner = THIS_MODULE,
249 .id_table = id_table,
250 .probe = scif_probe,
251 .remove = scif_remove,
252};
253
254static int _scif_init(void)
255{
256 int rc;
257
258 mutex_init(&scif_info.eplock);
259 spin_lock_init(&scif_info.rmalock);
260 spin_lock_init(&scif_info.nb_connect_lock);
261 spin_lock_init(&scif_info.port_lock);
262 mutex_init(&scif_info.conflock);
263 mutex_init(&scif_info.connlock);
264 mutex_init(&scif_info.fencelock);
265 INIT_LIST_HEAD(&scif_info.uaccept);
266 INIT_LIST_HEAD(&scif_info.listen);
267 INIT_LIST_HEAD(&scif_info.zombie);
268 INIT_LIST_HEAD(&scif_info.connected);
269 INIT_LIST_HEAD(&scif_info.disconnected);
270 INIT_LIST_HEAD(&scif_info.rma);
271 INIT_LIST_HEAD(&scif_info.rma_tc);
272 INIT_LIST_HEAD(&scif_info.mmu_notif_cleanup);
273 INIT_LIST_HEAD(&scif_info.fence);
274 INIT_LIST_HEAD(&scif_info.nb_connect_list);
275 init_waitqueue_head(&scif_info.exitwq);
276 scif_info.rma_tc_limit = SCIF_RMA_TEMP_CACHE_LIMIT;
277 scif_info.en_msg_log = 0;
278 scif_info.p2p_enable = 1;
279 rc = scif_setup_scifdev();
280 if (rc)
281 goto error;
282 unaligned_cache = kmem_cache_create("Unaligned_DMA",
283 SCIF_KMEM_UNALIGNED_BUF_SIZE,
284 0, SLAB_HWCACHE_ALIGN, NULL);
285 if (!unaligned_cache) {
286 rc = -ENOMEM;
287 goto free_sdev;
288 }
289 INIT_WORK(&scif_info.misc_work, scif_misc_handler);
290 INIT_WORK(&scif_info.mmu_notif_work, scif_mmu_notif_handler);
291 INIT_WORK(&scif_info.conn_work, scif_conn_handler);
292 idr_init(&scif_ports);
293 return 0;
294free_sdev:
295 scif_destroy_scifdev();
296error:
297 return rc;
298}
299
300static void _scif_exit(void)
301{
302 idr_destroy(&scif_ports);
303 kmem_cache_destroy(unaligned_cache);
304 scif_destroy_scifdev();
305}
306
307static int __init scif_init(void)
308{
309 struct miscdevice *mdev = &scif_info.mdev;
310 int rc;
311
312 _scif_init();
313 iova_cache_get();
314 rc = scif_peer_bus_init();
315 if (rc)
316 goto exit;
317 rc = scif_register_driver(&scif_driver);
318 if (rc)
319 goto peer_bus_exit;
320 rc = misc_register(mdev);
321 if (rc)
322 goto unreg_scif;
323 scif_init_debugfs();
324 return 0;
325unreg_scif:
326 scif_unregister_driver(&scif_driver);
327peer_bus_exit:
328 scif_peer_bus_exit();
329exit:
330 _scif_exit();
331 return rc;
332}
333
334static void __exit scif_exit(void)
335{
336 scif_exit_debugfs();
337 misc_deregister(&scif_info.mdev);
338 scif_unregister_driver(&scif_driver);
339 scif_peer_bus_exit();
340 iova_cache_put();
341 _scif_exit();
342}
343
344module_init(scif_init);
345module_exit(scif_exit);
346
347MODULE_DEVICE_TABLE(scif, id_table);
348MODULE_AUTHOR("Intel Corporation");
349MODULE_DESCRIPTION("Intel(R) SCIF driver");
350MODULE_LICENSE("GPL v2");