Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

intel_th: msu: Introduce buffer interface

Introduces a concept of external buffers, which is a mechanism for creating
trace sinks that would receive trace data from MSC buffers and transfer it
elsewhere.

A external buffer can implement its own window allocation/deallocation if
it has to. It must provide a callback that's used to notify it when a
window fills up, so that it can then start a DMA transaction from that
window 'elsewhere'. This window remains in a 'locked' state and won't be
used for storing new trace data until the buffer 'unlocks' it with a
provided API call, at which point the window can be used again for storing
trace data.

This relies on a functional "last block" interrupt, so not all versions of
Trace Hub can use this feature, which does not reflect on existing users.

Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Link: https://lore.kernel.org/r/20190705141425.19894-2-alexander.shishkin@linux.intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Alexander Shishkin and committed by
Greg Kroah-Hartman
615c164d db4ad308

+461 -30
+2 -1
Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
··· 12 12 - "single", for contiguous buffer mode (high-order alloc); 13 13 - "multi", for multiblock mode; 14 14 - "ExI", for DCI handler mode; 15 - - "debug", for debug mode. 15 + - "debug", for debug mode; 16 + - any of the currently loaded buffer sinks. 16 17 If operating mode changes, existing buffer is deallocated, 17 18 provided there are no active users and tracing is not enabled, 18 19 otherwise the write will fail.
+1
MAINTAINERS
··· 8360 8360 S: Supported 8361 8361 F: Documentation/trace/intel_th.rst 8362 8362 F: drivers/hwtracing/intel_th/ 8363 + F: include/linux/intel_th.h 8363 8364 8364 8365 INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) 8365 8366 M: Ning Sun <ning.sun@intel.com>
+368 -20
drivers/hwtracing/intel_th/msu.c
··· 17 17 #include <linux/mm.h> 18 18 #include <linux/fs.h> 19 19 #include <linux/io.h> 20 + #include <linux/workqueue.h> 20 21 #include <linux/dma-mapping.h> 21 22 22 23 #ifdef CONFIG_X86 23 24 #include <asm/set_memory.h> 24 25 #endif 25 26 27 + #include <linux/intel_th.h> 26 28 #include "intel_th.h" 27 29 #include "msu.h" 28 30 29 31 #define msc_dev(x) (&(x)->thdev->dev) 30 32 33 + /* 34 + * Lockout state transitions: 35 + * READY -> INUSE -+-> LOCKED -+-> READY -> etc. 36 + * \-----------/ 37 + * WIN_READY: window can be used by HW 38 + * WIN_INUSE: window is in use 39 + * WIN_LOCKED: window is filled up and is being processed by the buffer 40 + * handling code 41 + * 42 + * All state transitions happen automatically, except for the LOCKED->READY, 43 + * which needs to be signalled by the buffer code by calling 44 + * intel_th_msc_window_unlock(). 45 + * 46 + * When the interrupt handler has to switch to the next window, it checks 47 + * whether it's READY, and if it is, it performs the switch and tracing 48 + * continues. If it's LOCKED, it stops the trace. 49 + */ 50 + enum lockout_state { 51 + WIN_READY = 0, 52 + WIN_INUSE, 53 + WIN_LOCKED 54 + }; 55 + 31 56 /** 32 57 * struct msc_window - multiblock mode window descriptor 33 58 * @entry: window list linkage (msc::win_list) 34 59 * @pgoff: page offset into the buffer that this window starts at 60 + * @lockout: lockout state, see comment below 61 + * @lo_lock: lockout state serialization 35 62 * @nr_blocks: number of blocks (pages) in this window 36 63 * @nr_segs: number of segments in this window (<= @nr_blocks) 37 64 * @_sgt: array of block descriptors ··· 67 40 struct msc_window { 68 41 struct list_head entry; 69 42 unsigned long pgoff; 43 + enum lockout_state lockout; 44 + spinlock_t lo_lock; 70 45 unsigned int nr_blocks; 71 46 unsigned int nr_segs; 72 47 struct msc *msc; ··· 106 77 * struct msc - MSC device representation 107 78 * @reg_base: register window base address 108 79 * @thdev: intel_th_device pointer 80 + * @mbuf: MSU buffer, if assigned 81 + * @mbuf_priv MSU buffer's private data, if @mbuf 109 82 * @win_list: list of windows in multiblock mode 110 83 * @single_sgt: single mode buffer 111 84 * @cur_win: current window ··· 131 100 void __iomem *msu_base; 132 101 struct intel_th_device *thdev; 133 102 103 + const struct msu_buffer *mbuf; 104 + void *mbuf_priv; 105 + 106 + struct work_struct work; 134 107 struct list_head win_list; 135 108 struct sg_table single_sgt; 136 109 struct msc_window *cur_win; ··· 160 125 unsigned int burst_len; 161 126 unsigned int index; 162 127 }; 128 + 129 + static LIST_HEAD(msu_buffer_list); 130 + static struct mutex msu_buffer_mutex; 131 + 132 + /** 133 + * struct msu_buffer_entry - internal MSU buffer bookkeeping 134 + * @entry: link to msu_buffer_list 135 + * @mbuf: MSU buffer object 136 + * @owner: module that provides this MSU buffer 137 + */ 138 + struct msu_buffer_entry { 139 + struct list_head entry; 140 + const struct msu_buffer *mbuf; 141 + struct module *owner; 142 + }; 143 + 144 + static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name) 145 + { 146 + struct msu_buffer_entry *mbe; 147 + 148 + lockdep_assert_held(&msu_buffer_mutex); 149 + 150 + list_for_each_entry(mbe, &msu_buffer_list, entry) { 151 + if (!strcmp(mbe->mbuf->name, name)) 152 + return mbe; 153 + } 154 + 155 + return NULL; 156 + } 157 + 158 + static const struct msu_buffer * 159 + msu_buffer_get(const char *name) 160 + { 161 + struct msu_buffer_entry *mbe; 162 + 163 + mutex_lock(&msu_buffer_mutex); 164 + mbe = __msu_buffer_entry_find(name); 165 + if (mbe && !try_module_get(mbe->owner)) 166 + mbe = NULL; 167 + mutex_unlock(&msu_buffer_mutex); 168 + 169 + return mbe ? mbe->mbuf : NULL; 170 + } 171 + 172 + static void msu_buffer_put(const struct msu_buffer *mbuf) 173 + { 174 + struct msu_buffer_entry *mbe; 175 + 176 + mutex_lock(&msu_buffer_mutex); 177 + mbe = __msu_buffer_entry_find(mbuf->name); 178 + if (mbe) 179 + module_put(mbe->owner); 180 + mutex_unlock(&msu_buffer_mutex); 181 + } 182 + 183 + int intel_th_msu_buffer_register(const struct msu_buffer *mbuf, 184 + struct module *owner) 185 + { 186 + struct msu_buffer_entry *mbe; 187 + int ret = 0; 188 + 189 + mbe = kzalloc(sizeof(*mbe), GFP_KERNEL); 190 + if (!mbe) 191 + return -ENOMEM; 192 + 193 + mutex_lock(&msu_buffer_mutex); 194 + if (__msu_buffer_entry_find(mbuf->name)) { 195 + ret = -EEXIST; 196 + kfree(mbe); 197 + goto unlock; 198 + } 199 + 200 + mbe->mbuf = mbuf; 201 + mbe->owner = owner; 202 + list_add_tail(&mbe->entry, &msu_buffer_list); 203 + unlock: 204 + mutex_unlock(&msu_buffer_mutex); 205 + 206 + return ret; 207 + } 208 + EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register); 209 + 210 + void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf) 211 + { 212 + struct msu_buffer_entry *mbe; 213 + 214 + mutex_lock(&msu_buffer_mutex); 215 + mbe = __msu_buffer_entry_find(mbuf->name); 216 + if (mbe) { 217 + list_del(&mbe->entry); 218 + kfree(mbe); 219 + } 220 + mutex_unlock(&msu_buffer_mutex); 221 + } 222 + EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister); 163 223 164 224 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) 165 225 { ··· 316 186 entry); 317 187 318 188 return list_next_entry(win, entry); 189 + } 190 + 191 + static size_t msc_win_total_sz(struct msc_window *win) 192 + { 193 + unsigned int blk; 194 + size_t size = 0; 195 + 196 + for (blk = 0; blk < win->nr_segs; blk++) { 197 + struct msc_block_desc *bdesc = msc_win_block(win, blk); 198 + 199 + if (msc_block_wrapped(bdesc)) 200 + return win->nr_blocks << PAGE_SHIFT; 201 + 202 + size += msc_total_sz(bdesc); 203 + if (msc_block_last_written(bdesc)) 204 + break; 205 + } 206 + 207 + return size; 319 208 } 320 209 321 210 /** ··· 676 527 if (!msc->do_irq) 677 528 return 0; 678 529 530 + if (!msc->mbuf) 531 + return 0; 532 + 679 533 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL); 680 534 mintctl |= msc->index ? M1BLIE : M0BLIE; 681 535 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); ··· 706 554 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL); 707 555 } 708 556 557 + static int msc_win_set_lockout(struct msc_window *win, 558 + enum lockout_state expect, 559 + enum lockout_state new) 560 + { 561 + enum lockout_state old; 562 + unsigned long flags; 563 + int ret = 0; 564 + 565 + if (!win->msc->mbuf) 566 + return 0; 567 + 568 + spin_lock_irqsave(&win->lo_lock, flags); 569 + old = win->lockout; 570 + 571 + if (old != expect) { 572 + ret = -EINVAL; 573 + dev_warn_ratelimited(msc_dev(win->msc), 574 + "expected lockout state %d, got %d\n", 575 + expect, old); 576 + goto unlock; 577 + } 578 + 579 + win->lockout = new; 580 + 581 + unlock: 582 + spin_unlock_irqrestore(&win->lo_lock, flags); 583 + 584 + if (ret) { 585 + if (expect == WIN_READY && old == WIN_LOCKED) 586 + return -EBUSY; 587 + 588 + /* from intel_th_msc_window_unlock(), don't warn if not locked */ 589 + if (expect == WIN_LOCKED && old == new) 590 + return 0; 591 + } 592 + 593 + return ret; 594 + } 709 595 /** 710 596 * msc_configure() - set up MSC hardware 711 597 * @msc: the MSC device to configure ··· 761 571 if (msc->mode > MSC_MODE_MULTI) 762 572 return -ENOTSUPP; 763 573 764 - if (msc->mode == MSC_MODE_MULTI) 574 + if (msc->mode == MSC_MODE_MULTI) { 575 + if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE)) 576 + return -EBUSY; 577 + 765 578 msc_buffer_clear_hw_header(msc); 579 + } 766 580 767 581 reg = msc->base_addr >> PAGE_SHIFT; 768 582 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR); ··· 788 594 789 595 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 790 596 597 + intel_th_msu_init(msc); 598 + 791 599 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; 792 600 intel_th_trace_enable(msc->thdev); 793 601 msc->enabled = 1; 794 602 603 + if (msc->mbuf && msc->mbuf->activate) 604 + msc->mbuf->activate(msc->mbuf_priv); 795 605 796 606 return 0; 797 607 } ··· 809 611 */ 810 612 static void msc_disable(struct msc *msc) 811 613 { 614 + struct msc_window *win = msc->cur_win; 812 615 u32 reg; 813 616 814 617 lockdep_assert_held(&msc->buf_mutex); 815 618 619 + if (msc->mode == MSC_MODE_MULTI) 620 + msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); 621 + 622 + if (msc->mbuf && msc->mbuf->deactivate) 623 + msc->mbuf->deactivate(msc->mbuf_priv); 624 + intel_th_msu_deinit(msc); 816 625 intel_th_trace_disable(msc->thdev); 817 626 818 627 if (msc->mode == MSC_MODE_SINGLE) { ··· 835 630 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 836 631 reg &= ~MSC_EN; 837 632 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 633 + 634 + if (msc->mbuf && msc->mbuf->ready) 635 + msc->mbuf->ready(msc->mbuf_priv, win->sgt, 636 + msc_win_total_sz(win)); 637 + 838 638 msc->enabled = 0; 839 639 840 640 iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR); ··· 850 640 851 641 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); 852 642 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); 643 + 644 + reg = ioread32(msc->reg_base + REG_MSU_MSUSTS); 645 + reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 646 + iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS); 853 647 } 854 648 855 649 static int intel_th_msc_activate(struct intel_th_device *thdev) ··· 1070 856 1071 857 win->msc = msc; 1072 858 win->sgt = &win->_sgt; 859 + win->lockout = WIN_READY; 860 + spin_lock_init(&win->lo_lock); 1073 861 1074 862 if (!list_empty(&msc->win_list)) { 1075 863 struct msc_window *prev = list_last_entry(&msc->win_list, ··· 1081 865 win->pgoff = prev->pgoff + prev->nr_blocks; 1082 866 } 1083 867 1084 - ret = __msc_buffer_win_alloc(win, nr_blocks); 1085 - if (ret < 0) 868 + if (msc->mbuf && msc->mbuf->alloc_window) 869 + ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt, 870 + nr_blocks << PAGE_SHIFT); 871 + else 872 + ret = __msc_buffer_win_alloc(win, nr_blocks); 873 + 874 + if (ret <= 0) 1086 875 goto err_nomem; 1087 876 1088 877 msc_buffer_set_uc(win, ret); ··· 1146 925 1147 926 msc_buffer_set_wb(win); 1148 927 1149 - __msc_buffer_win_free(msc, win); 928 + if (msc->mbuf && msc->mbuf->free_window) 929 + msc->mbuf->free_window(msc->mbuf_priv, win->sgt); 930 + else 931 + __msc_buffer_win_free(msc, win); 1150 932 1151 933 kfree(win); 1152 934 } ··· 1686 1462 intel_th_trace_switch(msc->thdev); 1687 1463 } 1688 1464 1465 + /** 1466 + * intel_th_msc_window_unlock - put the window back in rotation 1467 + * @dev: MSC device to which this relates 1468 + * @sgt: buffer's sg_table for the window, does nothing if NULL 1469 + */ 1470 + void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt) 1471 + { 1472 + struct msc *msc = dev_get_drvdata(dev); 1473 + struct msc_window *win; 1474 + 1475 + if (!sgt) 1476 + return; 1477 + 1478 + win = msc_find_window(msc, sgt, false); 1479 + if (!win) 1480 + return; 1481 + 1482 + msc_win_set_lockout(win, WIN_LOCKED, WIN_READY); 1483 + } 1484 + EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock); 1485 + 1486 + static void msc_work(struct work_struct *work) 1487 + { 1488 + struct msc *msc = container_of(work, struct msc, work); 1489 + 1490 + intel_th_msc_deactivate(msc->thdev); 1491 + } 1492 + 1689 1493 static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev) 1690 1494 { 1691 1495 struct msc *msc = dev_get_drvdata(&thdev->dev); 1692 1496 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS); 1693 1497 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST; 1498 + struct msc_window *win, *next_win; 1694 1499 1695 - if (!(msusts & mask)) { 1696 - if (msc->enabled) 1697 - return IRQ_HANDLED; 1500 + if (!msc->do_irq || !msc->mbuf) 1698 1501 return IRQ_NONE; 1502 + 1503 + msusts &= mask; 1504 + 1505 + if (!msusts) 1506 + return msc->enabled ? IRQ_HANDLED : IRQ_NONE; 1507 + 1508 + iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS); 1509 + 1510 + if (!msc->enabled) 1511 + return IRQ_NONE; 1512 + 1513 + /* grab the window before we do the switch */ 1514 + win = msc->cur_win; 1515 + if (!win) 1516 + return IRQ_HANDLED; 1517 + next_win = msc_next_window(win); 1518 + if (!next_win) 1519 + return IRQ_HANDLED; 1520 + 1521 + /* next window: if READY, proceed, if LOCKED, stop the trace */ 1522 + if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) { 1523 + schedule_work(&msc->work); 1524 + return IRQ_HANDLED; 1699 1525 } 1526 + 1527 + /* current window: INUSE -> LOCKED */ 1528 + msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED); 1529 + 1530 + msc_win_switch(msc); 1531 + 1532 + if (msc->mbuf && msc->mbuf->ready) 1533 + msc->mbuf->ready(msc->mbuf_priv, win->sgt, 1534 + msc_win_total_sz(win)); 1700 1535 1701 1536 return IRQ_HANDLED; 1702 1537 } ··· 1794 1511 1795 1512 static DEVICE_ATTR_RW(wrap); 1796 1513 1514 + static void msc_buffer_unassign(struct msc *msc) 1515 + { 1516 + lockdep_assert_held(&msc->buf_mutex); 1517 + 1518 + if (!msc->mbuf) 1519 + return; 1520 + 1521 + msc->mbuf->unassign(msc->mbuf_priv); 1522 + msu_buffer_put(msc->mbuf); 1523 + msc->mbuf_priv = NULL; 1524 + msc->mbuf = NULL; 1525 + } 1526 + 1797 1527 static ssize_t 1798 1528 mode_show(struct device *dev, struct device_attribute *attr, char *buf) 1799 1529 { 1800 1530 struct msc *msc = dev_get_drvdata(dev); 1531 + const char *mode = msc_mode[msc->mode]; 1532 + ssize_t ret; 1801 1533 1802 - return scnprintf(buf, PAGE_SIZE, "%s\n", msc_mode[msc->mode]); 1534 + mutex_lock(&msc->buf_mutex); 1535 + if (msc->mbuf) 1536 + mode = msc->mbuf->name; 1537 + ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode); 1538 + mutex_unlock(&msc->buf_mutex); 1539 + 1540 + return ret; 1803 1541 } 1804 1542 1805 1543 static ssize_t 1806 1544 mode_store(struct device *dev, struct device_attribute *attr, const char *buf, 1807 1545 size_t size) 1808 1546 { 1547 + const struct msu_buffer *mbuf = NULL; 1809 1548 struct msc *msc = dev_get_drvdata(dev); 1810 1549 size_t len = size; 1811 - char *cp; 1550 + char *cp, *mode; 1812 1551 int i, ret; 1813 1552 1814 1553 if (!capable(CAP_SYS_RAWIO)) ··· 1840 1535 if (cp) 1841 1536 len = cp - buf; 1842 1537 1843 - for (i = 0; i < ARRAY_SIZE(msc_mode); i++) 1844 - if (!strncmp(msc_mode[i], buf, len)) 1845 - goto found; 1538 + mode = kstrndup(buf, len, GFP_KERNEL); 1539 + i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode); 1540 + if (i >= 0) 1541 + goto found; 1542 + 1543 + /* Buffer sinks only work with a usable IRQ */ 1544 + if (!msc->do_irq) { 1545 + kfree(mode); 1546 + return -EINVAL; 1547 + } 1548 + 1549 + mbuf = msu_buffer_get(mode); 1550 + kfree(mode); 1551 + if (mbuf) 1552 + goto found; 1846 1553 1847 1554 return -EINVAL; 1848 1555 1849 1556 found: 1850 1557 mutex_lock(&msc->buf_mutex); 1558 + ret = 0; 1559 + 1560 + /* Same buffer: do nothing */ 1561 + if (mbuf && mbuf == msc->mbuf) { 1562 + /* put the extra reference we just got */ 1563 + msu_buffer_put(mbuf); 1564 + goto unlock; 1565 + } 1566 + 1851 1567 ret = msc_buffer_unlocked_free_unless_used(msc); 1852 - if (!ret) 1853 - msc->mode = i; 1568 + if (ret) 1569 + goto unlock; 1570 + 1571 + if (mbuf) { 1572 + void *mbuf_priv = mbuf->assign(dev, &i); 1573 + 1574 + if (!mbuf_priv) { 1575 + ret = -ENOMEM; 1576 + goto unlock; 1577 + } 1578 + 1579 + msc_buffer_unassign(msc); 1580 + msc->mbuf_priv = mbuf_priv; 1581 + msc->mbuf = mbuf; 1582 + } else { 1583 + msc_buffer_unassign(msc); 1584 + } 1585 + 1586 + msc->mode = i; 1587 + 1588 + unlock: 1589 + if (ret && mbuf) 1590 + msu_buffer_put(mbuf); 1854 1591 mutex_unlock(&msc->buf_mutex); 1855 1592 1856 1593 return ret ? ret : size; ··· 2014 1667 return -EINVAL; 2015 1668 2016 1669 mutex_lock(&msc->buf_mutex); 2017 - if (msc->mode != MSC_MODE_MULTI) 1670 + /* 1671 + * Window switch can only happen in the "multi" mode. 1672 + * If a external buffer is engaged, they have the full 1673 + * control over window switching. 1674 + */ 1675 + if (msc->mode != MSC_MODE_MULTI || msc->mbuf) 2018 1676 ret = -ENOTSUPP; 2019 1677 else 2020 1678 msc_win_switch(msc); ··· 2072 1720 msc->reg_base = base + msc->index * 0x100; 2073 1721 msc->msu_base = base; 2074 1722 2075 - err = intel_th_msu_init(msc); 2076 - if (err) 2077 - return err; 2078 - 1723 + INIT_WORK(&msc->work, msc_work); 2079 1724 err = intel_th_msc_init(msc); 2080 1725 if (err) 2081 1726 return err; ··· 2088 1739 int ret; 2089 1740 2090 1741 intel_th_msc_deactivate(thdev); 2091 - intel_th_msu_deinit(msc); 2092 1742 2093 1743 /* 2094 1744 * Buffers should not be used at this point except if the
+11 -9
drivers/hwtracing/intel_th/msu.h
··· 44 44 #define M0BLIE BIT(16) 45 45 #define M1BLIE BIT(24) 46 46 47 - /* MSC operating modes (MSC_MODE) */ 48 - enum { 49 - MSC_MODE_SINGLE = 0, 50 - MSC_MODE_MULTI, 51 - MSC_MODE_EXI, 52 - MSC_MODE_DEBUG, 53 - }; 54 - 55 47 /* MSCnSTS bits */ 56 48 #define MSCSTS_WRAPSTAT BIT(1) /* Wrap occurred */ 57 49 #define MSCSTS_PLE BIT(2) /* Pipeline Empty */ ··· 85 93 return bdesc->valid_dw * 4 - MSC_BDESC; 86 94 } 87 95 96 + static inline unsigned long msc_total_sz(struct msc_block_desc *bdesc) 97 + { 98 + return bdesc->valid_dw * 4; 99 + } 100 + 101 + static inline unsigned long msc_block_sz(struct msc_block_desc *bdesc) 102 + { 103 + return bdesc->block_sz * 64 - MSC_BDESC; 104 + } 105 + 88 106 static inline bool msc_block_wrapped(struct msc_block_desc *bdesc) 89 107 { 90 108 if (bdesc->hw_tag & (MSC_HW_TAG_BLOCKWRAP | MSC_HW_TAG_WINWRAP)) ··· 106 104 static inline bool msc_block_last_written(struct msc_block_desc *bdesc) 107 105 { 108 106 if ((bdesc->hw_tag & MSC_HW_TAG_ENDBIT) || 109 - (msc_data_sz(bdesc) != DATA_IN_PAGE)) 107 + (msc_data_sz(bdesc) != msc_block_sz(bdesc))) 110 108 return true; 111 109 112 110 return false;
+79
include/linux/intel_th.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Intel(R) Trace Hub data structures for implementing buffer sinks. 4 + * 5 + * Copyright (C) 2019 Intel Corporation. 6 + */ 7 + 8 + #ifndef _INTEL_TH_H_ 9 + #define _INTEL_TH_H_ 10 + 11 + #include <linux/scatterlist.h> 12 + 13 + /* MSC operating modes (MSC_MODE) */ 14 + enum { 15 + MSC_MODE_SINGLE = 0, 16 + MSC_MODE_MULTI, 17 + MSC_MODE_EXI, 18 + MSC_MODE_DEBUG, 19 + }; 20 + 21 + struct msu_buffer { 22 + const char *name; 23 + /* 24 + * ->assign() called when buffer 'mode' is set to this driver 25 + * (aka mode_store()) 26 + * @device: struct device * of the msc 27 + * @mode: allows the driver to set HW mode (see the enum above) 28 + * Returns: a pointer to a private structure associated with this 29 + * msc or NULL in case of error. This private structure 30 + * will then be passed into all other callbacks. 31 + */ 32 + void *(*assign)(struct device *dev, int *mode); 33 + /* ->unassign(): some other mode is selected, clean up */ 34 + void (*unassign)(void *priv); 35 + /* 36 + * ->alloc_window(): allocate memory for the window of a given 37 + * size 38 + * @sgt: pointer to sg_table, can be overridden by the buffer 39 + * driver, or kept intact 40 + * Returns: number of sg table entries <= number of pages; 41 + * 0 is treated as an allocation failure. 42 + */ 43 + int (*alloc_window)(void *priv, struct sg_table **sgt, 44 + size_t size); 45 + void (*free_window)(void *priv, struct sg_table *sgt); 46 + /* ->activate(): trace has started */ 47 + void (*activate)(void *priv); 48 + /* ->deactivate(): trace is about to stop */ 49 + void (*deactivate)(void *priv); 50 + /* 51 + * ->ready(): window @sgt is filled up to the last block OR 52 + * tracing is stopped by the user; this window contains 53 + * @bytes data. The window in question transitions into 54 + * the "LOCKED" state, indicating that it can't be used 55 + * by hardware. To clear this state and make the window 56 + * available to the hardware again, call 57 + * intel_th_msc_window_unlock(). 58 + */ 59 + int (*ready)(void *priv, struct sg_table *sgt, size_t bytes); 60 + }; 61 + 62 + int intel_th_msu_buffer_register(const struct msu_buffer *mbuf, 63 + struct module *owner); 64 + void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf); 65 + void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt); 66 + 67 + #define module_intel_th_msu_buffer(__buffer) \ 68 + static int __init __buffer##_init(void) \ 69 + { \ 70 + return intel_th_msu_buffer_register(&(__buffer), THIS_MODULE); \ 71 + } \ 72 + module_init(__buffer##_init); \ 73 + static void __exit __buffer##_exit(void) \ 74 + { \ 75 + intel_th_msu_buffer_unregister(&(__buffer)); \ 76 + } \ 77 + module_exit(__buffer##_exit); 78 + 79 + #endif /* _INTEL_TH_H_ */