Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/pci.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/workqueue.h>
28#include <linux/prefetch.h>
29#include <linux/dca.h>
30#include <linux/aer.h>
31#include <linux/sizes.h>
32#include "dma.h"
33#include "registers.h"
34#include "hw.h"
35
36#include "../dmaengine.h"
37
38MODULE_VERSION(IOAT_DMA_VERSION);
39MODULE_LICENSE("Dual BSD/GPL");
40MODULE_AUTHOR("Intel Corporation");
41
42static struct pci_device_id ioat_pci_tbl[] = {
43 /* I/OAT v3 platforms */
44 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
45 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
46 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
47 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
48 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
49 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
50 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
51 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
52
53 /* I/OAT v3.2 platforms */
54 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
55 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
56 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
57 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
58 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
59 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
60 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
61 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
62 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
63 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
64
65 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
66 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
67 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
68 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
69 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
70 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
71 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
72 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
73 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
74 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
75
76 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
77 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
78 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
79 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
80 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
81 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
82 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
83 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
84 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
85 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
86
87 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
88 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
89 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
90 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
91 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
92 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
93 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
94 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
95 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
96 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
97
98 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) },
99 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) },
100 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) },
101 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) },
102 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) },
103 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) },
104 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) },
105 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) },
106 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
107 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
108
109 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
110
111 /* I/OAT v3.3 platforms */
112 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
113 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
114 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
115 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
116
117 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
118 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
119 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
120 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
121
122 { 0, }
123};
124MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
125
126static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
127static void ioat_remove(struct pci_dev *pdev);
128static void
129ioat_init_channel(struct ioatdma_device *ioat_dma,
130 struct ioatdma_chan *ioat_chan, int idx);
131static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
132static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
133static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
134
135static int ioat_dca_enabled = 1;
136module_param(ioat_dca_enabled, int, 0644);
137MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
138int ioat_pending_level = 4;
139module_param(ioat_pending_level, int, 0644);
140MODULE_PARM_DESC(ioat_pending_level,
141 "high-water mark for pushing ioat descriptors (default: 4)");
142static char ioat_interrupt_style[32] = "msix";
143module_param_string(ioat_interrupt_style, ioat_interrupt_style,
144 sizeof(ioat_interrupt_style), 0644);
145MODULE_PARM_DESC(ioat_interrupt_style,
146 "set ioat interrupt style: msix (default), msi, intx");
147
148struct kmem_cache *ioat_cache;
149struct kmem_cache *ioat_sed_cache;
150
151static bool is_jf_ioat(struct pci_dev *pdev)
152{
153 switch (pdev->device) {
154 case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
155 case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
156 case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
157 case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
158 case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
159 case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
160 case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
161 case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
162 case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
163 case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
164 return true;
165 default:
166 return false;
167 }
168}
169
170static bool is_snb_ioat(struct pci_dev *pdev)
171{
172 switch (pdev->device) {
173 case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
174 case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
175 case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
176 case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
177 case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
178 case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
179 case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
180 case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
181 case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
182 case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
183 return true;
184 default:
185 return false;
186 }
187}
188
189static bool is_ivb_ioat(struct pci_dev *pdev)
190{
191 switch (pdev->device) {
192 case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
193 case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
194 case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
195 case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
196 case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
197 case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
198 case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
199 case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
200 case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
201 case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
202 return true;
203 default:
204 return false;
205 }
206
207}
208
209static bool is_hsw_ioat(struct pci_dev *pdev)
210{
211 switch (pdev->device) {
212 case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
213 case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
214 case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
215 case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
216 case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
217 case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
218 case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
219 case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
220 case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
221 case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
222 return true;
223 default:
224 return false;
225 }
226
227}
228
229static bool is_bdx_ioat(struct pci_dev *pdev)
230{
231 switch (pdev->device) {
232 case PCI_DEVICE_ID_INTEL_IOAT_BDX0:
233 case PCI_DEVICE_ID_INTEL_IOAT_BDX1:
234 case PCI_DEVICE_ID_INTEL_IOAT_BDX2:
235 case PCI_DEVICE_ID_INTEL_IOAT_BDX3:
236 case PCI_DEVICE_ID_INTEL_IOAT_BDX4:
237 case PCI_DEVICE_ID_INTEL_IOAT_BDX5:
238 case PCI_DEVICE_ID_INTEL_IOAT_BDX6:
239 case PCI_DEVICE_ID_INTEL_IOAT_BDX7:
240 case PCI_DEVICE_ID_INTEL_IOAT_BDX8:
241 case PCI_DEVICE_ID_INTEL_IOAT_BDX9:
242 return true;
243 default:
244 return false;
245 }
246}
247
248static inline bool is_skx_ioat(struct pci_dev *pdev)
249{
250 return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
251}
252
253static bool is_xeon_cb32(struct pci_dev *pdev)
254{
255 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
256 is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
257}
258
259bool is_bwd_ioat(struct pci_dev *pdev)
260{
261 switch (pdev->device) {
262 case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
263 case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
264 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
265 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
266 /* even though not Atom, BDX-DE has same DMA silicon */
267 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
268 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
269 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
270 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
271 return true;
272 default:
273 return false;
274 }
275}
276
277static bool is_bwd_noraid(struct pci_dev *pdev)
278{
279 switch (pdev->device) {
280 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
281 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
282 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
283 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
284 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
285 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
286 return true;
287 default:
288 return false;
289 }
290
291}
292
293/*
294 * Perform a IOAT transaction to verify the HW works.
295 */
296#define IOAT_TEST_SIZE 2000
297
298static void ioat_dma_test_callback(void *dma_async_param)
299{
300 struct completion *cmp = dma_async_param;
301
302 complete(cmp);
303}
304
305/**
306 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
307 * @ioat_dma: dma device to be tested
308 */
309static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
310{
311 int i;
312 u8 *src;
313 u8 *dest;
314 struct dma_device *dma = &ioat_dma->dma_dev;
315 struct device *dev = &ioat_dma->pdev->dev;
316 struct dma_chan *dma_chan;
317 struct dma_async_tx_descriptor *tx;
318 dma_addr_t dma_dest, dma_src;
319 dma_cookie_t cookie;
320 int err = 0;
321 struct completion cmp;
322 unsigned long tmo;
323 unsigned long flags;
324
325 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
326 if (!src)
327 return -ENOMEM;
328 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
329 if (!dest) {
330 kfree(src);
331 return -ENOMEM;
332 }
333
334 /* Fill in src buffer */
335 for (i = 0; i < IOAT_TEST_SIZE; i++)
336 src[i] = (u8)i;
337
338 /* Start copy, using first DMA channel */
339 dma_chan = container_of(dma->channels.next, struct dma_chan,
340 device_node);
341 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
342 dev_err(dev, "selftest cannot allocate chan resource\n");
343 err = -ENODEV;
344 goto out;
345 }
346
347 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
348 if (dma_mapping_error(dev, dma_src)) {
349 dev_err(dev, "mapping src buffer failed\n");
350 err = -ENOMEM;
351 goto free_resources;
352 }
353 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
354 if (dma_mapping_error(dev, dma_dest)) {
355 dev_err(dev, "mapping dest buffer failed\n");
356 err = -ENOMEM;
357 goto unmap_src;
358 }
359 flags = DMA_PREP_INTERRUPT;
360 tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
361 dma_src, IOAT_TEST_SIZE,
362 flags);
363 if (!tx) {
364 dev_err(dev, "Self-test prep failed, disabling\n");
365 err = -ENODEV;
366 goto unmap_dma;
367 }
368
369 async_tx_ack(tx);
370 init_completion(&cmp);
371 tx->callback = ioat_dma_test_callback;
372 tx->callback_param = &cmp;
373 cookie = tx->tx_submit(tx);
374 if (cookie < 0) {
375 dev_err(dev, "Self-test setup failed, disabling\n");
376 err = -ENODEV;
377 goto unmap_dma;
378 }
379 dma->device_issue_pending(dma_chan);
380
381 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
382
383 if (tmo == 0 ||
384 dma->device_tx_status(dma_chan, cookie, NULL)
385 != DMA_COMPLETE) {
386 dev_err(dev, "Self-test copy timed out, disabling\n");
387 err = -ENODEV;
388 goto unmap_dma;
389 }
390 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
391 dev_err(dev, "Self-test copy failed compare, disabling\n");
392 err = -ENODEV;
393 goto free_resources;
394 }
395
396unmap_dma:
397 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
398unmap_src:
399 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
400free_resources:
401 dma->device_free_chan_resources(dma_chan);
402out:
403 kfree(src);
404 kfree(dest);
405 return err;
406}
407
408/**
409 * ioat_dma_setup_interrupts - setup interrupt handler
410 * @ioat_dma: ioat dma device
411 */
412int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
413{
414 struct ioatdma_chan *ioat_chan;
415 struct pci_dev *pdev = ioat_dma->pdev;
416 struct device *dev = &pdev->dev;
417 struct msix_entry *msix;
418 int i, j, msixcnt;
419 int err = -EINVAL;
420 u8 intrctrl = 0;
421
422 if (!strcmp(ioat_interrupt_style, "msix"))
423 goto msix;
424 if (!strcmp(ioat_interrupt_style, "msi"))
425 goto msi;
426 if (!strcmp(ioat_interrupt_style, "intx"))
427 goto intx;
428 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
429 goto err_no_irq;
430
431msix:
432 /* The number of MSI-X vectors should equal the number of channels */
433 msixcnt = ioat_dma->dma_dev.chancnt;
434 for (i = 0; i < msixcnt; i++)
435 ioat_dma->msix_entries[i].entry = i;
436
437 err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
438 if (err)
439 goto msi;
440
441 for (i = 0; i < msixcnt; i++) {
442 msix = &ioat_dma->msix_entries[i];
443 ioat_chan = ioat_chan_by_index(ioat_dma, i);
444 err = devm_request_irq(dev, msix->vector,
445 ioat_dma_do_interrupt_msix, 0,
446 "ioat-msix", ioat_chan);
447 if (err) {
448 for (j = 0; j < i; j++) {
449 msix = &ioat_dma->msix_entries[j];
450 ioat_chan = ioat_chan_by_index(ioat_dma, j);
451 devm_free_irq(dev, msix->vector, ioat_chan);
452 }
453 goto msi;
454 }
455 }
456 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
457 ioat_dma->irq_mode = IOAT_MSIX;
458 goto done;
459
460msi:
461 err = pci_enable_msi(pdev);
462 if (err)
463 goto intx;
464
465 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
466 "ioat-msi", ioat_dma);
467 if (err) {
468 pci_disable_msi(pdev);
469 goto intx;
470 }
471 ioat_dma->irq_mode = IOAT_MSI;
472 goto done;
473
474intx:
475 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
476 IRQF_SHARED, "ioat-intx", ioat_dma);
477 if (err)
478 goto err_no_irq;
479
480 ioat_dma->irq_mode = IOAT_INTX;
481done:
482 if (is_bwd_ioat(pdev))
483 ioat_intr_quirk(ioat_dma);
484 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
485 writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
486 return 0;
487
488err_no_irq:
489 /* Disable all interrupt generation */
490 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
491 ioat_dma->irq_mode = IOAT_NOIRQ;
492 dev_err(dev, "no usable interrupts\n");
493 return err;
494}
495
496static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
497{
498 /* Disable all interrupt generation */
499 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
500}
501
502static int ioat_probe(struct ioatdma_device *ioat_dma)
503{
504 int err = -ENODEV;
505 struct dma_device *dma = &ioat_dma->dma_dev;
506 struct pci_dev *pdev = ioat_dma->pdev;
507 struct device *dev = &pdev->dev;
508
509 ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
510 sizeof(u64),
511 SMP_CACHE_BYTES,
512 SMP_CACHE_BYTES);
513
514 if (!ioat_dma->completion_pool) {
515 err = -ENOMEM;
516 goto err_out;
517 }
518
519 ioat_enumerate_channels(ioat_dma);
520
521 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
522 dma->dev = &pdev->dev;
523
524 if (!dma->chancnt) {
525 dev_err(dev, "channel enumeration error\n");
526 goto err_setup_interrupts;
527 }
528
529 err = ioat_dma_setup_interrupts(ioat_dma);
530 if (err)
531 goto err_setup_interrupts;
532
533 err = ioat3_dma_self_test(ioat_dma);
534 if (err)
535 goto err_self_test;
536
537 return 0;
538
539err_self_test:
540 ioat_disable_interrupts(ioat_dma);
541err_setup_interrupts:
542 dma_pool_destroy(ioat_dma->completion_pool);
543err_out:
544 return err;
545}
546
547static int ioat_register(struct ioatdma_device *ioat_dma)
548{
549 int err = dma_async_device_register(&ioat_dma->dma_dev);
550
551 if (err) {
552 ioat_disable_interrupts(ioat_dma);
553 dma_pool_destroy(ioat_dma->completion_pool);
554 }
555
556 return err;
557}
558
559static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
560{
561 struct dma_device *dma = &ioat_dma->dma_dev;
562
563 ioat_disable_interrupts(ioat_dma);
564
565 ioat_kobject_del(ioat_dma);
566
567 dma_async_device_unregister(dma);
568
569 dma_pool_destroy(ioat_dma->completion_pool);
570
571 INIT_LIST_HEAD(&dma->channels);
572}
573
574/**
575 * ioat_enumerate_channels - find and initialize the device's channels
576 * @ioat_dma: the ioat dma device to be enumerated
577 */
578static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
579{
580 struct ioatdma_chan *ioat_chan;
581 struct device *dev = &ioat_dma->pdev->dev;
582 struct dma_device *dma = &ioat_dma->dma_dev;
583 u8 xfercap_log;
584 int i;
585
586 INIT_LIST_HEAD(&dma->channels);
587 dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
588 dma->chancnt &= 0x1f; /* bits [4:0] valid */
589 if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
590 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
591 dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
592 dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
593 }
594 xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
595 xfercap_log &= 0x1f; /* bits [4:0] valid */
596 if (xfercap_log == 0)
597 return 0;
598 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
599
600 for (i = 0; i < dma->chancnt; i++) {
601 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
602 if (!ioat_chan)
603 break;
604
605 ioat_init_channel(ioat_dma, ioat_chan, i);
606 ioat_chan->xfercap_log = xfercap_log;
607 spin_lock_init(&ioat_chan->prep_lock);
608 if (ioat_reset_hw(ioat_chan)) {
609 i = 0;
610 break;
611 }
612 }
613 dma->chancnt = i;
614 return i;
615}
616
617/**
618 * ioat_free_chan_resources - release all the descriptors
619 * @chan: the channel to be cleaned
620 */
621static void ioat_free_chan_resources(struct dma_chan *c)
622{
623 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
624 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
625 struct ioat_ring_ent *desc;
626 const int total_descs = 1 << ioat_chan->alloc_order;
627 int descs;
628 int i;
629
630 /* Before freeing channel resources first check
631 * if they have been previously allocated for this channel.
632 */
633 if (!ioat_chan->ring)
634 return;
635
636 ioat_stop(ioat_chan);
637 ioat_reset_hw(ioat_chan);
638
639 spin_lock_bh(&ioat_chan->cleanup_lock);
640 spin_lock_bh(&ioat_chan->prep_lock);
641 descs = ioat_ring_space(ioat_chan);
642 dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
643 for (i = 0; i < descs; i++) {
644 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
645 ioat_free_ring_ent(desc, c);
646 }
647
648 if (descs < total_descs)
649 dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
650 total_descs - descs);
651
652 for (i = 0; i < total_descs - descs; i++) {
653 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
654 dump_desc_dbg(ioat_chan, desc);
655 ioat_free_ring_ent(desc, c);
656 }
657
658 for (i = 0; i < ioat_chan->desc_chunks; i++) {
659 dma_free_coherent(to_dev(ioat_chan), SZ_2M,
660 ioat_chan->descs[i].virt,
661 ioat_chan->descs[i].hw);
662 ioat_chan->descs[i].virt = NULL;
663 ioat_chan->descs[i].hw = 0;
664 }
665 ioat_chan->desc_chunks = 0;
666
667 kfree(ioat_chan->ring);
668 ioat_chan->ring = NULL;
669 ioat_chan->alloc_order = 0;
670 dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
671 ioat_chan->completion_dma);
672 spin_unlock_bh(&ioat_chan->prep_lock);
673 spin_unlock_bh(&ioat_chan->cleanup_lock);
674
675 ioat_chan->last_completion = 0;
676 ioat_chan->completion_dma = 0;
677 ioat_chan->dmacount = 0;
678}
679
680/* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
681 * @chan: channel to be initialized
682 */
683static int ioat_alloc_chan_resources(struct dma_chan *c)
684{
685 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
686 struct ioat_ring_ent **ring;
687 u64 status;
688 int order;
689 int i = 0;
690 u32 chanerr;
691
692 /* have we already been set up? */
693 if (ioat_chan->ring)
694 return 1 << ioat_chan->alloc_order;
695
696 /* Setup register to interrupt and write completion status on error */
697 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
698
699 /* allocate a completion writeback area */
700 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
701 ioat_chan->completion =
702 dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
703 GFP_NOWAIT, &ioat_chan->completion_dma);
704 if (!ioat_chan->completion)
705 return -ENOMEM;
706
707 writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
708 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
709 writel(((u64)ioat_chan->completion_dma) >> 32,
710 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
711
712 order = IOAT_MAX_ORDER;
713 ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
714 if (!ring)
715 return -ENOMEM;
716
717 spin_lock_bh(&ioat_chan->cleanup_lock);
718 spin_lock_bh(&ioat_chan->prep_lock);
719 ioat_chan->ring = ring;
720 ioat_chan->head = 0;
721 ioat_chan->issued = 0;
722 ioat_chan->tail = 0;
723 ioat_chan->alloc_order = order;
724 set_bit(IOAT_RUN, &ioat_chan->state);
725 spin_unlock_bh(&ioat_chan->prep_lock);
726 spin_unlock_bh(&ioat_chan->cleanup_lock);
727
728 ioat_start_null_desc(ioat_chan);
729
730 /* check that we got off the ground */
731 do {
732 udelay(1);
733 status = ioat_chansts(ioat_chan);
734 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
735
736 if (is_ioat_active(status) || is_ioat_idle(status))
737 return 1 << ioat_chan->alloc_order;
738
739 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
740
741 dev_WARN(to_dev(ioat_chan),
742 "failed to start channel chanerr: %#x\n", chanerr);
743 ioat_free_chan_resources(c);
744 return -EFAULT;
745}
746
747/* common channel initialization */
748static void
749ioat_init_channel(struct ioatdma_device *ioat_dma,
750 struct ioatdma_chan *ioat_chan, int idx)
751{
752 struct dma_device *dma = &ioat_dma->dma_dev;
753 struct dma_chan *c = &ioat_chan->dma_chan;
754 unsigned long data = (unsigned long) c;
755
756 ioat_chan->ioat_dma = ioat_dma;
757 ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
758 spin_lock_init(&ioat_chan->cleanup_lock);
759 ioat_chan->dma_chan.device = dma;
760 dma_cookie_init(&ioat_chan->dma_chan);
761 list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
762 ioat_dma->idx[idx] = ioat_chan;
763 setup_timer(&ioat_chan->timer, ioat_timer_event, data);
764 tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
765}
766
767#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
768static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
769{
770 int i, src_idx;
771 struct page *dest;
772 struct page *xor_srcs[IOAT_NUM_SRC_TEST];
773 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
774 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
775 dma_addr_t dest_dma;
776 struct dma_async_tx_descriptor *tx;
777 struct dma_chan *dma_chan;
778 dma_cookie_t cookie;
779 u8 cmp_byte = 0;
780 u32 cmp_word;
781 u32 xor_val_result;
782 int err = 0;
783 struct completion cmp;
784 unsigned long tmo;
785 struct device *dev = &ioat_dma->pdev->dev;
786 struct dma_device *dma = &ioat_dma->dma_dev;
787 u8 op = 0;
788
789 dev_dbg(dev, "%s\n", __func__);
790
791 if (!dma_has_cap(DMA_XOR, dma->cap_mask))
792 return 0;
793
794 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
795 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
796 if (!xor_srcs[src_idx]) {
797 while (src_idx--)
798 __free_page(xor_srcs[src_idx]);
799 return -ENOMEM;
800 }
801 }
802
803 dest = alloc_page(GFP_KERNEL);
804 if (!dest) {
805 while (src_idx--)
806 __free_page(xor_srcs[src_idx]);
807 return -ENOMEM;
808 }
809
810 /* Fill in src buffers */
811 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
812 u8 *ptr = page_address(xor_srcs[src_idx]);
813
814 for (i = 0; i < PAGE_SIZE; i++)
815 ptr[i] = (1 << src_idx);
816 }
817
818 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
819 cmp_byte ^= (u8) (1 << src_idx);
820
821 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
822 (cmp_byte << 8) | cmp_byte;
823
824 memset(page_address(dest), 0, PAGE_SIZE);
825
826 dma_chan = container_of(dma->channels.next, struct dma_chan,
827 device_node);
828 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
829 err = -ENODEV;
830 goto out;
831 }
832
833 /* test xor */
834 op = IOAT_OP_XOR;
835
836 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
837 if (dma_mapping_error(dev, dest_dma)) {
838 err = -ENOMEM;
839 goto free_resources;
840 }
841
842 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
843 dma_srcs[i] = DMA_ERROR_CODE;
844 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
845 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
846 DMA_TO_DEVICE);
847 if (dma_mapping_error(dev, dma_srcs[i])) {
848 err = -ENOMEM;
849 goto dma_unmap;
850 }
851 }
852 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
853 IOAT_NUM_SRC_TEST, PAGE_SIZE,
854 DMA_PREP_INTERRUPT);
855
856 if (!tx) {
857 dev_err(dev, "Self-test xor prep failed\n");
858 err = -ENODEV;
859 goto dma_unmap;
860 }
861
862 async_tx_ack(tx);
863 init_completion(&cmp);
864 tx->callback = ioat_dma_test_callback;
865 tx->callback_param = &cmp;
866 cookie = tx->tx_submit(tx);
867 if (cookie < 0) {
868 dev_err(dev, "Self-test xor setup failed\n");
869 err = -ENODEV;
870 goto dma_unmap;
871 }
872 dma->device_issue_pending(dma_chan);
873
874 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
875
876 if (tmo == 0 ||
877 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
878 dev_err(dev, "Self-test xor timed out\n");
879 err = -ENODEV;
880 goto dma_unmap;
881 }
882
883 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
884 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
885
886 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
887 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
888 u32 *ptr = page_address(dest);
889
890 if (ptr[i] != cmp_word) {
891 dev_err(dev, "Self-test xor failed compare\n");
892 err = -ENODEV;
893 goto free_resources;
894 }
895 }
896 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
897
898 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
899
900 /* skip validate if the capability is not present */
901 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
902 goto free_resources;
903
904 op = IOAT_OP_XOR_VAL;
905
906 /* validate the sources with the destintation page */
907 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
908 xor_val_srcs[i] = xor_srcs[i];
909 xor_val_srcs[i] = dest;
910
911 xor_val_result = 1;
912
913 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
914 dma_srcs[i] = DMA_ERROR_CODE;
915 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
916 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
917 DMA_TO_DEVICE);
918 if (dma_mapping_error(dev, dma_srcs[i])) {
919 err = -ENOMEM;
920 goto dma_unmap;
921 }
922 }
923 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
924 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
925 &xor_val_result, DMA_PREP_INTERRUPT);
926 if (!tx) {
927 dev_err(dev, "Self-test zero prep failed\n");
928 err = -ENODEV;
929 goto dma_unmap;
930 }
931
932 async_tx_ack(tx);
933 init_completion(&cmp);
934 tx->callback = ioat_dma_test_callback;
935 tx->callback_param = &cmp;
936 cookie = tx->tx_submit(tx);
937 if (cookie < 0) {
938 dev_err(dev, "Self-test zero setup failed\n");
939 err = -ENODEV;
940 goto dma_unmap;
941 }
942 dma->device_issue_pending(dma_chan);
943
944 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
945
946 if (tmo == 0 ||
947 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
948 dev_err(dev, "Self-test validate timed out\n");
949 err = -ENODEV;
950 goto dma_unmap;
951 }
952
953 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
954 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
955
956 if (xor_val_result != 0) {
957 dev_err(dev, "Self-test validate failed compare\n");
958 err = -ENODEV;
959 goto free_resources;
960 }
961
962 memset(page_address(dest), 0, PAGE_SIZE);
963
964 /* test for non-zero parity sum */
965 op = IOAT_OP_XOR_VAL;
966
967 xor_val_result = 0;
968 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
969 dma_srcs[i] = DMA_ERROR_CODE;
970 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
971 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
972 DMA_TO_DEVICE);
973 if (dma_mapping_error(dev, dma_srcs[i])) {
974 err = -ENOMEM;
975 goto dma_unmap;
976 }
977 }
978 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
979 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
980 &xor_val_result, DMA_PREP_INTERRUPT);
981 if (!tx) {
982 dev_err(dev, "Self-test 2nd zero prep failed\n");
983 err = -ENODEV;
984 goto dma_unmap;
985 }
986
987 async_tx_ack(tx);
988 init_completion(&cmp);
989 tx->callback = ioat_dma_test_callback;
990 tx->callback_param = &cmp;
991 cookie = tx->tx_submit(tx);
992 if (cookie < 0) {
993 dev_err(dev, "Self-test 2nd zero setup failed\n");
994 err = -ENODEV;
995 goto dma_unmap;
996 }
997 dma->device_issue_pending(dma_chan);
998
999 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1000
1001 if (tmo == 0 ||
1002 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1003 dev_err(dev, "Self-test 2nd validate timed out\n");
1004 err = -ENODEV;
1005 goto dma_unmap;
1006 }
1007
1008 if (xor_val_result != SUM_CHECK_P_RESULT) {
1009 dev_err(dev, "Self-test validate failed compare\n");
1010 err = -ENODEV;
1011 goto dma_unmap;
1012 }
1013
1014 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1015 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1016
1017 goto free_resources;
1018dma_unmap:
1019 if (op == IOAT_OP_XOR) {
1020 if (dest_dma != DMA_ERROR_CODE)
1021 dma_unmap_page(dev, dest_dma, PAGE_SIZE,
1022 DMA_FROM_DEVICE);
1023 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1024 if (dma_srcs[i] != DMA_ERROR_CODE)
1025 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1026 DMA_TO_DEVICE);
1027 } else if (op == IOAT_OP_XOR_VAL) {
1028 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1029 if (dma_srcs[i] != DMA_ERROR_CODE)
1030 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1031 DMA_TO_DEVICE);
1032 }
1033free_resources:
1034 dma->device_free_chan_resources(dma_chan);
1035out:
1036 src_idx = IOAT_NUM_SRC_TEST;
1037 while (src_idx--)
1038 __free_page(xor_srcs[src_idx]);
1039 __free_page(dest);
1040 return err;
1041}
1042
1043static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma)
1044{
1045 int rc;
1046
1047 rc = ioat_dma_self_test(ioat_dma);
1048 if (rc)
1049 return rc;
1050
1051 rc = ioat_xor_val_self_test(ioat_dma);
1052
1053 return rc;
1054}
1055
1056static void ioat_intr_quirk(struct ioatdma_device *ioat_dma)
1057{
1058 struct dma_device *dma;
1059 struct dma_chan *c;
1060 struct ioatdma_chan *ioat_chan;
1061 u32 errmask;
1062
1063 dma = &ioat_dma->dma_dev;
1064
1065 /*
1066 * if we have descriptor write back error status, we mask the
1067 * error interrupts
1068 */
1069 if (ioat_dma->cap & IOAT_CAP_DWBES) {
1070 list_for_each_entry(c, &dma->channels, device_node) {
1071 ioat_chan = to_ioat_chan(c);
1072 errmask = readl(ioat_chan->reg_base +
1073 IOAT_CHANERR_MASK_OFFSET);
1074 errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
1075 IOAT_CHANERR_XOR_Q_ERR;
1076 writel(errmask, ioat_chan->reg_base +
1077 IOAT_CHANERR_MASK_OFFSET);
1078 }
1079 }
1080}
1081
1082static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
1083{
1084 struct pci_dev *pdev = ioat_dma->pdev;
1085 int dca_en = system_has_dca_enabled(pdev);
1086 struct dma_device *dma;
1087 struct dma_chan *c;
1088 struct ioatdma_chan *ioat_chan;
1089 int err;
1090 u16 val16;
1091
1092 dma = &ioat_dma->dma_dev;
1093 dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
1094 dma->device_issue_pending = ioat_issue_pending;
1095 dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
1096 dma->device_free_chan_resources = ioat_free_chan_resources;
1097
1098 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1099 dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock;
1100
1101 ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET);
1102
1103 if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
1104 ioat_dma->cap &=
1105 ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
1106
1107 /* dca is incompatible with raid operations */
1108 if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1109 ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1110
1111 if (ioat_dma->cap & IOAT_CAP_XOR) {
1112 dma->max_xor = 8;
1113
1114 dma_cap_set(DMA_XOR, dma->cap_mask);
1115 dma->device_prep_dma_xor = ioat_prep_xor;
1116
1117 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1118 dma->device_prep_dma_xor_val = ioat_prep_xor_val;
1119 }
1120
1121 if (ioat_dma->cap & IOAT_CAP_PQ) {
1122
1123 dma->device_prep_dma_pq = ioat_prep_pq;
1124 dma->device_prep_dma_pq_val = ioat_prep_pq_val;
1125 dma_cap_set(DMA_PQ, dma->cap_mask);
1126 dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1127
1128 if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1129 dma_set_maxpq(dma, 16, 0);
1130 else
1131 dma_set_maxpq(dma, 8, 0);
1132
1133 if (!(ioat_dma->cap & IOAT_CAP_XOR)) {
1134 dma->device_prep_dma_xor = ioat_prep_pqxor;
1135 dma->device_prep_dma_xor_val = ioat_prep_pqxor_val;
1136 dma_cap_set(DMA_XOR, dma->cap_mask);
1137 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1138
1139 if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1140 dma->max_xor = 16;
1141 else
1142 dma->max_xor = 8;
1143 }
1144 }
1145
1146 dma->device_tx_status = ioat_tx_status;
1147
1148 /* starting with CB3.3 super extended descriptors are supported */
1149 if (ioat_dma->cap & IOAT_CAP_RAID16SS) {
1150 char pool_name[14];
1151 int i;
1152
1153 for (i = 0; i < MAX_SED_POOLS; i++) {
1154 snprintf(pool_name, 14, "ioat_hw%d_sed", i);
1155
1156 /* allocate SED DMA pool */
1157 ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name,
1158 &pdev->dev,
1159 SED_SIZE * (i + 1), 64, 0);
1160 if (!ioat_dma->sed_hw_pool[i])
1161 return -ENOMEM;
1162
1163 }
1164 }
1165
1166 if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ)))
1167 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
1168
1169 err = ioat_probe(ioat_dma);
1170 if (err)
1171 return err;
1172
1173 list_for_each_entry(c, &dma->channels, device_node) {
1174 ioat_chan = to_ioat_chan(c);
1175 writel(IOAT_DMA_DCA_ANY_CPU,
1176 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
1177 }
1178
1179 err = ioat_register(ioat_dma);
1180 if (err)
1181 return err;
1182
1183 ioat_kobject_add(ioat_dma, &ioat_ktype);
1184
1185 if (dca)
1186 ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
1187
1188 /* disable relaxed ordering */
1189 err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16);
1190 if (err)
1191 return err;
1192
1193 /* clear relaxed ordering enable */
1194 val16 &= ~IOAT_DEVCTRL_ROE;
1195 err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16);
1196 if (err)
1197 return err;
1198
1199 return 0;
1200}
1201
1202static void ioat_shutdown(struct pci_dev *pdev)
1203{
1204 struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1205 struct ioatdma_chan *ioat_chan;
1206 int i;
1207
1208 if (!ioat_dma)
1209 return;
1210
1211 for (i = 0; i < IOAT_MAX_CHANS; i++) {
1212 ioat_chan = ioat_dma->idx[i];
1213 if (!ioat_chan)
1214 continue;
1215
1216 spin_lock_bh(&ioat_chan->prep_lock);
1217 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1218 del_timer_sync(&ioat_chan->timer);
1219 spin_unlock_bh(&ioat_chan->prep_lock);
1220 /* this should quiesce then reset */
1221 ioat_reset_hw(ioat_chan);
1222 }
1223
1224 ioat_disable_interrupts(ioat_dma);
1225}
1226
1227static void ioat_resume(struct ioatdma_device *ioat_dma)
1228{
1229 struct ioatdma_chan *ioat_chan;
1230 u32 chanerr;
1231 int i;
1232
1233 for (i = 0; i < IOAT_MAX_CHANS; i++) {
1234 ioat_chan = ioat_dma->idx[i];
1235 if (!ioat_chan)
1236 continue;
1237
1238 spin_lock_bh(&ioat_chan->prep_lock);
1239 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1240 spin_unlock_bh(&ioat_chan->prep_lock);
1241
1242 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1243 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1244
1245 /* no need to reset as shutdown already did that */
1246 }
1247}
1248
1249#define DRV_NAME "ioatdma"
1250
1251static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev,
1252 enum pci_channel_state error)
1253{
1254 dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error);
1255
1256 /* quiesce and block I/O */
1257 ioat_shutdown(pdev);
1258
1259 return PCI_ERS_RESULT_NEED_RESET;
1260}
1261
1262static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
1263{
1264 pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
1265 int err;
1266
1267 dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME);
1268
1269 if (pci_enable_device_mem(pdev) < 0) {
1270 dev_err(&pdev->dev,
1271 "Failed to enable PCIe device after reset.\n");
1272 result = PCI_ERS_RESULT_DISCONNECT;
1273 } else {
1274 pci_set_master(pdev);
1275 pci_restore_state(pdev);
1276 pci_save_state(pdev);
1277 pci_wake_from_d3(pdev, false);
1278 }
1279
1280 err = pci_cleanup_aer_uncorrect_error_status(pdev);
1281 if (err) {
1282 dev_err(&pdev->dev,
1283 "AER uncorrect error status clear failed: %#x\n", err);
1284 }
1285
1286 return result;
1287}
1288
1289static void ioat_pcie_error_resume(struct pci_dev *pdev)
1290{
1291 struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1292
1293 dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME);
1294
1295 /* initialize and bring everything back */
1296 ioat_resume(ioat_dma);
1297}
1298
1299static const struct pci_error_handlers ioat_err_handler = {
1300 .error_detected = ioat_pcie_error_detected,
1301 .slot_reset = ioat_pcie_error_slot_reset,
1302 .resume = ioat_pcie_error_resume,
1303};
1304
1305static struct pci_driver ioat_pci_driver = {
1306 .name = DRV_NAME,
1307 .id_table = ioat_pci_tbl,
1308 .probe = ioat_pci_probe,
1309 .remove = ioat_remove,
1310 .shutdown = ioat_shutdown,
1311 .err_handler = &ioat_err_handler,
1312};
1313
1314static struct ioatdma_device *
1315alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
1316{
1317 struct device *dev = &pdev->dev;
1318 struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
1319
1320 if (!d)
1321 return NULL;
1322 d->pdev = pdev;
1323 d->reg_base = iobase;
1324 return d;
1325}
1326
1327static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1328{
1329 void __iomem * const *iomap;
1330 struct device *dev = &pdev->dev;
1331 struct ioatdma_device *device;
1332 int err;
1333
1334 err = pcim_enable_device(pdev);
1335 if (err)
1336 return err;
1337
1338 err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
1339 if (err)
1340 return err;
1341 iomap = pcim_iomap_table(pdev);
1342 if (!iomap)
1343 return -ENOMEM;
1344
1345 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1346 if (err)
1347 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1348 if (err)
1349 return err;
1350
1351 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1352 if (err)
1353 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1354 if (err)
1355 return err;
1356
1357 device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
1358 if (!device)
1359 return -ENOMEM;
1360 pci_set_master(pdev);
1361 pci_set_drvdata(pdev, device);
1362
1363 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1364 if (device->version >= IOAT_VER_3_0) {
1365 if (is_skx_ioat(pdev))
1366 device->version = IOAT_VER_3_2;
1367 err = ioat3_dma_probe(device, ioat_dca_enabled);
1368
1369 if (device->version >= IOAT_VER_3_3)
1370 pci_enable_pcie_error_reporting(pdev);
1371 } else
1372 return -ENODEV;
1373
1374 if (err) {
1375 dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
1376 pci_disable_pcie_error_reporting(pdev);
1377 return -ENODEV;
1378 }
1379
1380 return 0;
1381}
1382
1383static void ioat_remove(struct pci_dev *pdev)
1384{
1385 struct ioatdma_device *device = pci_get_drvdata(pdev);
1386
1387 if (!device)
1388 return;
1389
1390 dev_err(&pdev->dev, "Removing dma and dca services\n");
1391 if (device->dca) {
1392 unregister_dca_provider(device->dca, &pdev->dev);
1393 free_dca_provider(device->dca);
1394 device->dca = NULL;
1395 }
1396
1397 pci_disable_pcie_error_reporting(pdev);
1398 ioat_dma_remove(device);
1399}
1400
1401static int __init ioat_init_module(void)
1402{
1403 int err = -ENOMEM;
1404
1405 pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
1406 DRV_NAME, IOAT_DMA_VERSION);
1407
1408 ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
1409 0, SLAB_HWCACHE_ALIGN, NULL);
1410 if (!ioat_cache)
1411 return -ENOMEM;
1412
1413 ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
1414 if (!ioat_sed_cache)
1415 goto err_ioat_cache;
1416
1417 err = pci_register_driver(&ioat_pci_driver);
1418 if (err)
1419 goto err_ioat3_cache;
1420
1421 return 0;
1422
1423 err_ioat3_cache:
1424 kmem_cache_destroy(ioat_sed_cache);
1425
1426 err_ioat_cache:
1427 kmem_cache_destroy(ioat_cache);
1428
1429 return err;
1430}
1431module_init(ioat_init_module);
1432
1433static void __exit ioat_exit_module(void)
1434{
1435 pci_unregister_driver(&ioat_pci_driver);
1436 kmem_cache_destroy(ioat_cache);
1437}
1438module_exit(ioat_exit_module);