+38
-23
kernel/events/core.c
+38
-23
kernel/events/core.c
···
1814
1814
PERF_EVENT_STATE_INACTIVE;
1815
1815
}
1816
1816
1817
-
static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
1817
+
static int __perf_event_read_size(u64 read_format, int nr_siblings)
1818
1818
{
1819
1819
int entry = sizeof(u64); /* value */
1820
1820
int size = 0;
1821
1821
int nr = 1;
1822
1822
1823
-
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1823
+
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1824
1824
size += sizeof(u64);
1825
1825
1826
-
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1826
+
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1827
1827
size += sizeof(u64);
1828
1828
1829
-
if (event->attr.read_format & PERF_FORMAT_ID)
1829
+
if (read_format & PERF_FORMAT_ID)
1830
1830
entry += sizeof(u64);
1831
1831
1832
-
if (event->attr.read_format & PERF_FORMAT_LOST)
1832
+
if (read_format & PERF_FORMAT_LOST)
1833
1833
entry += sizeof(u64);
1834
1834
1835
-
if (event->attr.read_format & PERF_FORMAT_GROUP) {
1835
+
if (read_format & PERF_FORMAT_GROUP) {
1836
1836
nr += nr_siblings;
1837
1837
size += sizeof(u64);
1838
1838
}
1839
1839
1840
-
size += entry * nr;
1841
-
event->read_size = size;
1840
+
/*
1841
+
* Since perf_event_validate_size() limits this to 16k and inhibits
1842
+
* adding more siblings, this will never overflow.
1843
+
*/
1844
+
return size + nr * entry;
1842
1845
}
1843
1846
1844
1847
static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
···
1891
1888
*/
1892
1889
static void perf_event__header_size(struct perf_event *event)
1893
1890
{
1894
-
__perf_event_read_size(event,
1895
-
event->group_leader->nr_siblings);
1891
+
event->read_size =
1892
+
__perf_event_read_size(event->attr.read_format,
1893
+
event->group_leader->nr_siblings);
1896
1894
__perf_event_header_size(event, event->attr.sample_type);
1897
1895
}
1898
1896
···
1924
1920
event->id_header_size = size;
1925
1921
}
1926
1922
1923
+
/*
1924
+
* Check that adding an event to the group does not result in anybody
1925
+
* overflowing the 64k event limit imposed by the output buffer.
1926
+
*
1927
+
* Specifically, check that the read_size for the event does not exceed 16k,
1928
+
* read_size being the one term that grows with groups size. Since read_size
1929
+
* depends on per-event read_format, also (re)check the existing events.
1930
+
*
1931
+
* This leaves 48k for the constant size fields and things like callchains,
1932
+
* branch stacks and register sets.
1933
+
*/
1927
1934
static bool perf_event_validate_size(struct perf_event *event)
1928
1935
{
1929
-
/*
1930
-
* The values computed here will be over-written when we actually
1931
-
* attach the event.
1932
-
*/
1933
-
__perf_event_read_size(event, event->group_leader->nr_siblings + 1);
1934
-
__perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
1935
-
perf_event__id_header_size(event);
1936
+
struct perf_event *sibling, *group_leader = event->group_leader;
1936
1937
1937
-
/*
1938
-
* Sum the lot; should not exceed the 64k limit we have on records.
1939
-
* Conservative limit to allow for callchains and other variable fields.
1940
-
*/
1941
-
if (event->read_size + event->header_size +
1942
-
event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
1938
+
if (__perf_event_read_size(event->attr.read_format,
1939
+
group_leader->nr_siblings + 1) > 16*1024)
1943
1940
return false;
1941
+
1942
+
if (__perf_event_read_size(group_leader->attr.read_format,
1943
+
group_leader->nr_siblings + 1) > 16*1024)
1944
+
return false;
1945
+
1946
+
for_each_sibling_event(sibling, group_leader) {
1947
+
if (__perf_event_read_size(sibling->attr.read_format,
1948
+
group_leader->nr_siblings + 1) > 16*1024)
1949
+
return false;
1950
+
}
1944
1951
1945
1952
return true;
1946
1953
}