Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.16-rc1 806 lines 22 kB view raw
1/* 2 * Intel MIC Platform Software Stack (MPSS) 3 * 4 * Copyright(c) 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License, version 2, as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * General Public License for more details. 14 * 15 * The full GNU General Public License is included in this distribution in 16 * the file called "COPYING". 17 * 18 * Intel MIC X100 DMA Driver. 19 * 20 * Adapted from IOAT dma driver. 21 */ 22#include <linux/module.h> 23#include <linux/io.h> 24#include <linux/seq_file.h> 25#include <linux/vmalloc.h> 26 27#include "mic_x100_dma.h" 28 29#define MIC_DMA_MAX_XFER_SIZE_CARD (1 * 1024 * 1024 -\ 30 MIC_DMA_ALIGN_BYTES) 31#define MIC_DMA_MAX_XFER_SIZE_HOST (1 * 1024 * 1024 >> 1) 32#define MIC_DMA_DESC_TYPE_SHIFT 60 33#define MIC_DMA_MEMCPY_LEN_SHIFT 46 34#define MIC_DMA_STAT_INTR_SHIFT 59 35 36/* high-water mark for pushing dma descriptors */ 37static int mic_dma_pending_level = 4; 38 39/* Status descriptor is used to write a 64 bit value to a memory location */ 40enum mic_dma_desc_format_type { 41 MIC_DMA_MEMCPY = 1, 42 MIC_DMA_STATUS, 43}; 44 45static inline u32 mic_dma_hw_ring_inc(u32 val) 46{ 47 return (val + 1) % MIC_DMA_DESC_RX_SIZE; 48} 49 50static inline u32 mic_dma_hw_ring_dec(u32 val) 51{ 52 return val ? val - 1 : MIC_DMA_DESC_RX_SIZE - 1; 53} 54 55static inline void mic_dma_hw_ring_inc_head(struct mic_dma_chan *ch) 56{ 57 ch->head = mic_dma_hw_ring_inc(ch->head); 58} 59 60/* Prepare a memcpy desc */ 61static inline void mic_dma_memcpy_desc(struct mic_dma_desc *desc, 62 dma_addr_t src_phys, dma_addr_t dst_phys, u64 size) 63{ 64 u64 qw0, qw1; 65 66 qw0 = src_phys; 67 qw0 |= (size >> MIC_DMA_ALIGN_SHIFT) << MIC_DMA_MEMCPY_LEN_SHIFT; 68 qw1 = MIC_DMA_MEMCPY; 69 qw1 <<= MIC_DMA_DESC_TYPE_SHIFT; 70 qw1 |= dst_phys; 71 desc->qw0 = qw0; 72 desc->qw1 = qw1; 73} 74 75/* Prepare a status desc. with @data to be written at @dst_phys */ 76static inline void mic_dma_prep_status_desc(struct mic_dma_desc *desc, u64 data, 77 dma_addr_t dst_phys, bool generate_intr) 78{ 79 u64 qw0, qw1; 80 81 qw0 = data; 82 qw1 = (u64) MIC_DMA_STATUS << MIC_DMA_DESC_TYPE_SHIFT | dst_phys; 83 if (generate_intr) 84 qw1 |= (1ULL << MIC_DMA_STAT_INTR_SHIFT); 85 desc->qw0 = qw0; 86 desc->qw1 = qw1; 87} 88 89static void mic_dma_cleanup(struct mic_dma_chan *ch) 90{ 91 struct dma_async_tx_descriptor *tx; 92 u32 tail; 93 u32 last_tail; 94 95 spin_lock(&ch->cleanup_lock); 96 tail = mic_dma_read_cmp_cnt(ch); 97 /* 98 * This is the barrier pair for smp_wmb() in fn. 99 * mic_dma_tx_submit_unlock. It's required so that we read the 100 * updated cookie value from tx->cookie. 101 */ 102 smp_rmb(); 103 for (last_tail = ch->last_tail; tail != last_tail;) { 104 tx = &ch->tx_array[last_tail]; 105 if (tx->cookie) { 106 dma_cookie_complete(tx); 107 dmaengine_desc_get_callback_invoke(tx, NULL); 108 tx->callback = NULL; 109 } 110 last_tail = mic_dma_hw_ring_inc(last_tail); 111 } 112 /* finish all completion callbacks before incrementing tail */ 113 smp_mb(); 114 ch->last_tail = last_tail; 115 spin_unlock(&ch->cleanup_lock); 116} 117 118static u32 mic_dma_ring_count(u32 head, u32 tail) 119{ 120 u32 count; 121 122 if (head >= tail) 123 count = (tail - 0) + (MIC_DMA_DESC_RX_SIZE - head); 124 else 125 count = tail - head; 126 return count - 1; 127} 128 129/* Returns the num. of free descriptors on success, -ENOMEM on failure */ 130static int mic_dma_avail_desc_ring_space(struct mic_dma_chan *ch, int required) 131{ 132 struct device *dev = mic_dma_ch_to_device(ch); 133 u32 count; 134 135 count = mic_dma_ring_count(ch->head, ch->last_tail); 136 if (count < required) { 137 mic_dma_cleanup(ch); 138 count = mic_dma_ring_count(ch->head, ch->last_tail); 139 } 140 141 if (count < required) { 142 dev_dbg(dev, "Not enough desc space"); 143 dev_dbg(dev, "%s %d required=%u, avail=%u\n", 144 __func__, __LINE__, required, count); 145 return -ENOMEM; 146 } else { 147 return count; 148 } 149} 150 151/* Program memcpy descriptors into the descriptor ring and update s/w head ptr*/ 152static int mic_dma_prog_memcpy_desc(struct mic_dma_chan *ch, dma_addr_t src, 153 dma_addr_t dst, size_t len) 154{ 155 size_t current_transfer_len; 156 size_t max_xfer_size = to_mic_dma_dev(ch)->max_xfer_size; 157 /* 3 is added to make sure we have enough space for status desc */ 158 int num_desc = len / max_xfer_size + 3; 159 int ret; 160 161 if (len % max_xfer_size) 162 num_desc++; 163 164 ret = mic_dma_avail_desc_ring_space(ch, num_desc); 165 if (ret < 0) 166 return ret; 167 do { 168 current_transfer_len = min(len, max_xfer_size); 169 mic_dma_memcpy_desc(&ch->desc_ring[ch->head], 170 src, dst, current_transfer_len); 171 mic_dma_hw_ring_inc_head(ch); 172 len -= current_transfer_len; 173 dst = dst + current_transfer_len; 174 src = src + current_transfer_len; 175 } while (len > 0); 176 return 0; 177} 178 179/* It's a h/w quirk and h/w needs 2 status descriptors for every status desc */ 180static void mic_dma_prog_intr(struct mic_dma_chan *ch) 181{ 182 mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, 183 ch->status_dest_micpa, false); 184 mic_dma_hw_ring_inc_head(ch); 185 mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, 186 ch->status_dest_micpa, true); 187 mic_dma_hw_ring_inc_head(ch); 188} 189 190/* Wrapper function to program memcpy descriptors/status descriptors */ 191static int mic_dma_do_dma(struct mic_dma_chan *ch, int flags, dma_addr_t src, 192 dma_addr_t dst, size_t len) 193{ 194 if (len && -ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len)) { 195 return -ENOMEM; 196 } else { 197 /* 3 is the maximum number of status descriptors */ 198 int ret = mic_dma_avail_desc_ring_space(ch, 3); 199 200 if (ret < 0) 201 return ret; 202 } 203 204 /* Above mic_dma_prog_memcpy_desc() makes sure we have enough space */ 205 if (flags & DMA_PREP_FENCE) { 206 mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, 207 ch->status_dest_micpa, false); 208 mic_dma_hw_ring_inc_head(ch); 209 } 210 211 if (flags & DMA_PREP_INTERRUPT) 212 mic_dma_prog_intr(ch); 213 214 return 0; 215} 216 217static inline void mic_dma_issue_pending(struct dma_chan *ch) 218{ 219 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); 220 221 spin_lock(&mic_ch->issue_lock); 222 /* 223 * Write to head triggers h/w to act on the descriptors. 224 * On MIC, writing the same head value twice causes 225 * a h/w error. On second write, h/w assumes we filled 226 * the entire ring & overwrote some of the descriptors. 227 */ 228 if (mic_ch->issued == mic_ch->submitted) 229 goto out; 230 mic_ch->issued = mic_ch->submitted; 231 /* 232 * make descriptor updates visible before advancing head, 233 * this is purposefully not smp_wmb() since we are also 234 * publishing the descriptor updates to a dma device 235 */ 236 wmb(); 237 mic_dma_write_reg(mic_ch, MIC_DMA_REG_DHPR, mic_ch->issued); 238out: 239 spin_unlock(&mic_ch->issue_lock); 240} 241 242static inline void mic_dma_update_pending(struct mic_dma_chan *ch) 243{ 244 if (mic_dma_ring_count(ch->issued, ch->submitted) 245 > mic_dma_pending_level) 246 mic_dma_issue_pending(&ch->api_ch); 247} 248 249static dma_cookie_t mic_dma_tx_submit_unlock(struct dma_async_tx_descriptor *tx) 250{ 251 struct mic_dma_chan *mic_ch = to_mic_dma_chan(tx->chan); 252 dma_cookie_t cookie; 253 254 dma_cookie_assign(tx); 255 cookie = tx->cookie; 256 /* 257 * We need an smp write barrier here because another CPU might see 258 * an update to submitted and update h/w head even before we 259 * assigned a cookie to this tx. 260 */ 261 smp_wmb(); 262 mic_ch->submitted = mic_ch->head; 263 spin_unlock(&mic_ch->prep_lock); 264 mic_dma_update_pending(mic_ch); 265 return cookie; 266} 267 268static inline struct dma_async_tx_descriptor * 269allocate_tx(struct mic_dma_chan *ch) 270{ 271 u32 idx = mic_dma_hw_ring_dec(ch->head); 272 struct dma_async_tx_descriptor *tx = &ch->tx_array[idx]; 273 274 dma_async_tx_descriptor_init(tx, &ch->api_ch); 275 tx->tx_submit = mic_dma_tx_submit_unlock; 276 return tx; 277} 278 279/* Program a status descriptor with dst as address and value to be written */ 280static struct dma_async_tx_descriptor * 281mic_dma_prep_status_lock(struct dma_chan *ch, dma_addr_t dst, u64 src_val, 282 unsigned long flags) 283{ 284 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); 285 int result; 286 287 spin_lock(&mic_ch->prep_lock); 288 result = mic_dma_avail_desc_ring_space(mic_ch, 4); 289 if (result < 0) 290 goto error; 291 mic_dma_prep_status_desc(&mic_ch->desc_ring[mic_ch->head], src_val, dst, 292 false); 293 mic_dma_hw_ring_inc_head(mic_ch); 294 result = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); 295 if (result < 0) 296 goto error; 297 298 return allocate_tx(mic_ch); 299error: 300 dev_err(mic_dma_ch_to_device(mic_ch), 301 "Error enqueueing dma status descriptor, error=%d\n", result); 302 spin_unlock(&mic_ch->prep_lock); 303 return NULL; 304} 305 306/* 307 * Prepare a memcpy descriptor to be added to the ring. 308 * Note that the temporary descriptor adds an extra overhead of copying the 309 * descriptor to ring. So, we copy directly to the descriptor ring 310 */ 311static struct dma_async_tx_descriptor * 312mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest, 313 dma_addr_t dma_src, size_t len, unsigned long flags) 314{ 315 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); 316 struct device *dev = mic_dma_ch_to_device(mic_ch); 317 int result; 318 319 if (!len && !flags) 320 return NULL; 321 322 spin_lock(&mic_ch->prep_lock); 323 result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len); 324 if (result >= 0) 325 return allocate_tx(mic_ch); 326 dev_err(dev, "Error enqueueing dma, error=%d\n", result); 327 spin_unlock(&mic_ch->prep_lock); 328 return NULL; 329} 330 331static struct dma_async_tx_descriptor * 332mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags) 333{ 334 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); 335 int ret; 336 337 spin_lock(&mic_ch->prep_lock); 338 ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); 339 if (!ret) 340 return allocate_tx(mic_ch); 341 spin_unlock(&mic_ch->prep_lock); 342 return NULL; 343} 344 345/* Return the status of the transaction */ 346static enum dma_status 347mic_dma_tx_status(struct dma_chan *ch, dma_cookie_t cookie, 348 struct dma_tx_state *txstate) 349{ 350 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); 351 352 if (DMA_COMPLETE != dma_cookie_status(ch, cookie, txstate)) 353 mic_dma_cleanup(mic_ch); 354 355 return dma_cookie_status(ch, cookie, txstate); 356} 357 358static irqreturn_t mic_dma_thread_fn(int irq, void *data) 359{ 360 mic_dma_cleanup((struct mic_dma_chan *)data); 361 return IRQ_HANDLED; 362} 363 364static irqreturn_t mic_dma_intr_handler(int irq, void *data) 365{ 366 struct mic_dma_chan *ch = ((struct mic_dma_chan *)data); 367 368 mic_dma_ack_interrupt(ch); 369 return IRQ_WAKE_THREAD; 370} 371 372static int mic_dma_alloc_desc_ring(struct mic_dma_chan *ch) 373{ 374 u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring); 375 struct device *dev = &to_mbus_device(ch)->dev; 376 377 desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES); 378 ch->desc_ring = kzalloc(desc_ring_size, GFP_KERNEL); 379 380 if (!ch->desc_ring) 381 return -ENOMEM; 382 383 ch->desc_ring_micpa = dma_map_single(dev, ch->desc_ring, 384 desc_ring_size, DMA_BIDIRECTIONAL); 385 if (dma_mapping_error(dev, ch->desc_ring_micpa)) 386 goto map_error; 387 388 ch->tx_array = vzalloc(MIC_DMA_DESC_RX_SIZE * sizeof(*ch->tx_array)); 389 if (!ch->tx_array) 390 goto tx_error; 391 return 0; 392tx_error: 393 dma_unmap_single(dev, ch->desc_ring_micpa, desc_ring_size, 394 DMA_BIDIRECTIONAL); 395map_error: 396 kfree(ch->desc_ring); 397 return -ENOMEM; 398} 399 400static void mic_dma_free_desc_ring(struct mic_dma_chan *ch) 401{ 402 u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring); 403 404 vfree(ch->tx_array); 405 desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES); 406 dma_unmap_single(&to_mbus_device(ch)->dev, ch->desc_ring_micpa, 407 desc_ring_size, DMA_BIDIRECTIONAL); 408 kfree(ch->desc_ring); 409 ch->desc_ring = NULL; 410} 411 412static void mic_dma_free_status_dest(struct mic_dma_chan *ch) 413{ 414 dma_unmap_single(&to_mbus_device(ch)->dev, ch->status_dest_micpa, 415 L1_CACHE_BYTES, DMA_BIDIRECTIONAL); 416 kfree(ch->status_dest); 417} 418 419static int mic_dma_alloc_status_dest(struct mic_dma_chan *ch) 420{ 421 struct device *dev = &to_mbus_device(ch)->dev; 422 423 ch->status_dest = kzalloc(L1_CACHE_BYTES, GFP_KERNEL); 424 if (!ch->status_dest) 425 return -ENOMEM; 426 ch->status_dest_micpa = dma_map_single(dev, ch->status_dest, 427 L1_CACHE_BYTES, DMA_BIDIRECTIONAL); 428 if (dma_mapping_error(dev, ch->status_dest_micpa)) { 429 kfree(ch->status_dest); 430 ch->status_dest = NULL; 431 return -ENOMEM; 432 } 433 return 0; 434} 435 436static int mic_dma_check_chan(struct mic_dma_chan *ch) 437{ 438 if (mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR) || 439 mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) & MIC_DMA_CHAN_QUIESCE) { 440 mic_dma_disable_chan(ch); 441 mic_dma_chan_mask_intr(ch); 442 dev_err(mic_dma_ch_to_device(ch), 443 "%s %d error setting up mic dma chan %d\n", 444 __func__, __LINE__, ch->ch_num); 445 return -EBUSY; 446 } 447 return 0; 448} 449 450static int mic_dma_chan_setup(struct mic_dma_chan *ch) 451{ 452 if (MIC_DMA_CHAN_MIC == ch->owner) 453 mic_dma_chan_set_owner(ch); 454 mic_dma_disable_chan(ch); 455 mic_dma_chan_mask_intr(ch); 456 mic_dma_write_reg(ch, MIC_DMA_REG_DCHERRMSK, 0); 457 mic_dma_chan_set_desc_ring(ch); 458 ch->last_tail = mic_dma_read_reg(ch, MIC_DMA_REG_DTPR); 459 ch->head = ch->last_tail; 460 ch->issued = 0; 461 mic_dma_chan_unmask_intr(ch); 462 mic_dma_enable_chan(ch); 463 return mic_dma_check_chan(ch); 464} 465 466static void mic_dma_chan_destroy(struct mic_dma_chan *ch) 467{ 468 mic_dma_disable_chan(ch); 469 mic_dma_chan_mask_intr(ch); 470} 471 472static void mic_dma_unregister_dma_device(struct mic_dma_device *mic_dma_dev) 473{ 474 dma_async_device_unregister(&mic_dma_dev->dma_dev); 475} 476 477static int mic_dma_setup_irq(struct mic_dma_chan *ch) 478{ 479 ch->cookie = 480 to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch), 481 mic_dma_intr_handler, mic_dma_thread_fn, 482 "mic dma_channel", ch, ch->ch_num); 483 return PTR_ERR_OR_ZERO(ch->cookie); 484} 485 486static inline void mic_dma_free_irq(struct mic_dma_chan *ch) 487{ 488 to_mbus_hw_ops(ch)->free_irq(to_mbus_device(ch), ch->cookie, ch); 489} 490 491static int mic_dma_chan_init(struct mic_dma_chan *ch) 492{ 493 int ret = mic_dma_alloc_desc_ring(ch); 494 495 if (ret) 496 goto ring_error; 497 ret = mic_dma_alloc_status_dest(ch); 498 if (ret) 499 goto status_error; 500 ret = mic_dma_chan_setup(ch); 501 if (ret) 502 goto chan_error; 503 return ret; 504chan_error: 505 mic_dma_free_status_dest(ch); 506status_error: 507 mic_dma_free_desc_ring(ch); 508ring_error: 509 return ret; 510} 511 512static int mic_dma_drain_chan(struct mic_dma_chan *ch) 513{ 514 struct dma_async_tx_descriptor *tx; 515 int err = 0; 516 dma_cookie_t cookie; 517 518 tx = mic_dma_prep_memcpy_lock(&ch->api_ch, 0, 0, 0, DMA_PREP_FENCE); 519 if (!tx) { 520 err = -ENOMEM; 521 goto error; 522 } 523 524 cookie = tx->tx_submit(tx); 525 if (dma_submit_error(cookie)) 526 err = -ENOMEM; 527 else 528 err = dma_sync_wait(&ch->api_ch, cookie); 529 if (err) { 530 dev_err(mic_dma_ch_to_device(ch), "%s %d TO chan 0x%x\n", 531 __func__, __LINE__, ch->ch_num); 532 err = -EIO; 533 } 534error: 535 mic_dma_cleanup(ch); 536 return err; 537} 538 539static inline void mic_dma_chan_uninit(struct mic_dma_chan *ch) 540{ 541 mic_dma_chan_destroy(ch); 542 mic_dma_cleanup(ch); 543 mic_dma_free_status_dest(ch); 544 mic_dma_free_desc_ring(ch); 545} 546 547static int mic_dma_init(struct mic_dma_device *mic_dma_dev, 548 enum mic_dma_chan_owner owner) 549{ 550 int i, first_chan = mic_dma_dev->start_ch; 551 struct mic_dma_chan *ch; 552 int ret; 553 554 for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { 555 ch = &mic_dma_dev->mic_ch[i]; 556 ch->ch_num = i; 557 ch->owner = owner; 558 spin_lock_init(&ch->cleanup_lock); 559 spin_lock_init(&ch->prep_lock); 560 spin_lock_init(&ch->issue_lock); 561 ret = mic_dma_setup_irq(ch); 562 if (ret) 563 goto error; 564 } 565 return 0; 566error: 567 for (i = i - 1; i >= first_chan; i--) 568 mic_dma_free_irq(ch); 569 return ret; 570} 571 572static void mic_dma_uninit(struct mic_dma_device *mic_dma_dev) 573{ 574 int i, first_chan = mic_dma_dev->start_ch; 575 struct mic_dma_chan *ch; 576 577 for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { 578 ch = &mic_dma_dev->mic_ch[i]; 579 mic_dma_free_irq(ch); 580 } 581} 582 583static int mic_dma_alloc_chan_resources(struct dma_chan *ch) 584{ 585 int ret = mic_dma_chan_init(to_mic_dma_chan(ch)); 586 if (ret) 587 return ret; 588 return MIC_DMA_DESC_RX_SIZE; 589} 590 591static void mic_dma_free_chan_resources(struct dma_chan *ch) 592{ 593 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); 594 mic_dma_drain_chan(mic_ch); 595 mic_dma_chan_uninit(mic_ch); 596} 597 598/* Set the fn. handlers and register the dma device with dma api */ 599static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev, 600 enum mic_dma_chan_owner owner) 601{ 602 int i, first_chan = mic_dma_dev->start_ch; 603 604 dma_cap_zero(mic_dma_dev->dma_dev.cap_mask); 605 /* 606 * This dma engine is not capable of host memory to host memory 607 * transfers 608 */ 609 dma_cap_set(DMA_MEMCPY, mic_dma_dev->dma_dev.cap_mask); 610 611 if (MIC_DMA_CHAN_HOST == owner) 612 dma_cap_set(DMA_PRIVATE, mic_dma_dev->dma_dev.cap_mask); 613 mic_dma_dev->dma_dev.device_alloc_chan_resources = 614 mic_dma_alloc_chan_resources; 615 mic_dma_dev->dma_dev.device_free_chan_resources = 616 mic_dma_free_chan_resources; 617 mic_dma_dev->dma_dev.device_tx_status = mic_dma_tx_status; 618 mic_dma_dev->dma_dev.device_prep_dma_memcpy = mic_dma_prep_memcpy_lock; 619 mic_dma_dev->dma_dev.device_prep_dma_imm_data = 620 mic_dma_prep_status_lock; 621 mic_dma_dev->dma_dev.device_prep_dma_interrupt = 622 mic_dma_prep_interrupt_lock; 623 mic_dma_dev->dma_dev.device_issue_pending = mic_dma_issue_pending; 624 mic_dma_dev->dma_dev.copy_align = MIC_DMA_ALIGN_SHIFT; 625 INIT_LIST_HEAD(&mic_dma_dev->dma_dev.channels); 626 for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { 627 mic_dma_dev->mic_ch[i].api_ch.device = &mic_dma_dev->dma_dev; 628 dma_cookie_init(&mic_dma_dev->mic_ch[i].api_ch); 629 list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node, 630 &mic_dma_dev->dma_dev.channels); 631 } 632 return dma_async_device_register(&mic_dma_dev->dma_dev); 633} 634 635/* 636 * Initializes dma channels and registers the dma device with the 637 * dma engine api. 638 */ 639static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev, 640 enum mic_dma_chan_owner owner) 641{ 642 struct mic_dma_device *mic_dma_dev; 643 int ret; 644 struct device *dev = &mbdev->dev; 645 646 mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL); 647 if (!mic_dma_dev) { 648 ret = -ENOMEM; 649 goto alloc_error; 650 } 651 mic_dma_dev->mbdev = mbdev; 652 mic_dma_dev->dma_dev.dev = dev; 653 mic_dma_dev->mmio = mbdev->mmio_va; 654 if (MIC_DMA_CHAN_HOST == owner) { 655 mic_dma_dev->start_ch = 0; 656 mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_HOST; 657 } else { 658 mic_dma_dev->start_ch = 4; 659 mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_CARD; 660 } 661 ret = mic_dma_init(mic_dma_dev, owner); 662 if (ret) 663 goto init_error; 664 ret = mic_dma_register_dma_device(mic_dma_dev, owner); 665 if (ret) 666 goto reg_error; 667 return mic_dma_dev; 668reg_error: 669 mic_dma_uninit(mic_dma_dev); 670init_error: 671 kfree(mic_dma_dev); 672 mic_dma_dev = NULL; 673alloc_error: 674 dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret); 675 return mic_dma_dev; 676} 677 678static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) 679{ 680 mic_dma_unregister_dma_device(mic_dma_dev); 681 mic_dma_uninit(mic_dma_dev); 682 kfree(mic_dma_dev); 683} 684 685/* DEBUGFS CODE */ 686static int mic_dma_reg_seq_show(struct seq_file *s, void *pos) 687{ 688 struct mic_dma_device *mic_dma_dev = s->private; 689 int i, chan_num, first_chan = mic_dma_dev->start_ch; 690 struct mic_dma_chan *ch; 691 692 seq_printf(s, "SBOX_DCR: %#x\n", 693 mic_dma_mmio_read(&mic_dma_dev->mic_ch[first_chan], 694 MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR)); 695 seq_puts(s, "DMA Channel Registers\n"); 696 seq_printf(s, "%-10s| %-10s %-10s %-10s %-10s %-10s", 697 "Channel", "DCAR", "DTPR", "DHPR", "DRAR_HI", "DRAR_LO"); 698 seq_printf(s, " %-11s %-14s %-10s\n", "DCHERR", "DCHERRMSK", "DSTAT"); 699 for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { 700 ch = &mic_dma_dev->mic_ch[i]; 701 chan_num = ch->ch_num; 702 seq_printf(s, "%-10i| %-#10x %-#10x %-#10x %-#10x", 703 chan_num, 704 mic_dma_read_reg(ch, MIC_DMA_REG_DCAR), 705 mic_dma_read_reg(ch, MIC_DMA_REG_DTPR), 706 mic_dma_read_reg(ch, MIC_DMA_REG_DHPR), 707 mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_HI)); 708 seq_printf(s, " %-#10x %-#10x %-#14x %-#10x\n", 709 mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_LO), 710 mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR), 711 mic_dma_read_reg(ch, MIC_DMA_REG_DCHERRMSK), 712 mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT)); 713 } 714 return 0; 715} 716 717static int mic_dma_reg_debug_open(struct inode *inode, struct file *file) 718{ 719 return single_open(file, mic_dma_reg_seq_show, inode->i_private); 720} 721 722static int mic_dma_reg_debug_release(struct inode *inode, struct file *file) 723{ 724 return single_release(inode, file); 725} 726 727static const struct file_operations mic_dma_reg_ops = { 728 .owner = THIS_MODULE, 729 .open = mic_dma_reg_debug_open, 730 .read = seq_read, 731 .llseek = seq_lseek, 732 .release = mic_dma_reg_debug_release 733}; 734 735/* Debugfs parent dir */ 736static struct dentry *mic_dma_dbg; 737 738static int mic_dma_driver_probe(struct mbus_device *mbdev) 739{ 740 struct mic_dma_device *mic_dma_dev; 741 enum mic_dma_chan_owner owner; 742 743 if (MBUS_DEV_DMA_MIC == mbdev->id.device) 744 owner = MIC_DMA_CHAN_MIC; 745 else 746 owner = MIC_DMA_CHAN_HOST; 747 748 mic_dma_dev = mic_dma_dev_reg(mbdev, owner); 749 dev_set_drvdata(&mbdev->dev, mic_dma_dev); 750 751 if (mic_dma_dbg) { 752 mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev), 753 mic_dma_dbg); 754 if (mic_dma_dev->dbg_dir) 755 debugfs_create_file("mic_dma_reg", 0444, 756 mic_dma_dev->dbg_dir, mic_dma_dev, 757 &mic_dma_reg_ops); 758 } 759 return 0; 760} 761 762static void mic_dma_driver_remove(struct mbus_device *mbdev) 763{ 764 struct mic_dma_device *mic_dma_dev; 765 766 mic_dma_dev = dev_get_drvdata(&mbdev->dev); 767 debugfs_remove_recursive(mic_dma_dev->dbg_dir); 768 mic_dma_dev_unreg(mic_dma_dev); 769} 770 771static struct mbus_device_id id_table[] = { 772 {MBUS_DEV_DMA_MIC, MBUS_DEV_ANY_ID}, 773 {MBUS_DEV_DMA_HOST, MBUS_DEV_ANY_ID}, 774 {0}, 775}; 776 777static struct mbus_driver mic_dma_driver = { 778 .driver.name = KBUILD_MODNAME, 779 .driver.owner = THIS_MODULE, 780 .id_table = id_table, 781 .probe = mic_dma_driver_probe, 782 .remove = mic_dma_driver_remove, 783}; 784 785static int __init mic_x100_dma_init(void) 786{ 787 int rc = mbus_register_driver(&mic_dma_driver); 788 if (rc) 789 return rc; 790 mic_dma_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL); 791 return 0; 792} 793 794static void __exit mic_x100_dma_exit(void) 795{ 796 debugfs_remove_recursive(mic_dma_dbg); 797 mbus_unregister_driver(&mic_dma_driver); 798} 799 800module_init(mic_x100_dma_init); 801module_exit(mic_x100_dma_exit); 802 803MODULE_DEVICE_TABLE(mbus, id_table); 804MODULE_AUTHOR("Intel Corporation"); 805MODULE_DESCRIPTION("Intel(R) MIC X100 DMA Driver"); 806MODULE_LICENSE("GPL v2");