Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kcov: support comparison operands collection

Enables kcov to collect comparison operands from instrumented code.
This is done by using Clang's -fsanitize=trace-cmp instrumentation
(currently not available for GCC).

The comparison operands help a lot in fuzz testing. E.g. they are used
in Syzkaller to cover the interiors of conditional statements with way
less attempts and thus make previously unreachable code reachable.

To allow separate collection of coverage and comparison operands two
different work modes are implemented. Mode selection is now done via a
KCOV_ENABLE ioctl call with corresponding argument value.

Link: http://lkml.kernel.org/r/20171011095459.70721-1-glider@google.com
Signed-off-by: Victor Chibotaru <tchibo@google.com>
Signed-off-by: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Andrey Konovalov <andreyknvl@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Alexander Popov <alex.popov@linux.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Cc: <syzkaller@googlegroups.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Victor Chibotaru and committed by
Linus Torvalds
ded97d2c fcf4edac

+213 -41
+8 -4
include/linux/kcov.h
··· 8 8 9 9 #ifdef CONFIG_KCOV 10 10 11 - void kcov_task_init(struct task_struct *t); 12 - void kcov_task_exit(struct task_struct *t); 13 - 14 11 enum kcov_mode { 15 12 /* Coverage collection is not enabled yet. */ 16 13 KCOV_MODE_DISABLED = 0, 14 + /* KCOV was initialized, but tracing mode hasn't been chosen yet. */ 15 + KCOV_MODE_INIT = 1, 17 16 /* 18 17 * Tracing coverage collection mode. 19 18 * Covered PCs are collected in a per-task buffer. 20 19 */ 21 - KCOV_MODE_TRACE = 1, 20 + KCOV_MODE_TRACE_PC = 2, 21 + /* Collecting comparison operands mode. */ 22 + KCOV_MODE_TRACE_CMP = 3, 22 23 }; 24 + 25 + void kcov_task_init(struct task_struct *t); 26 + void kcov_task_exit(struct task_struct *t); 23 27 24 28 #else 25 29
+24
include/uapi/linux/kcov.h
··· 8 8 #define KCOV_ENABLE _IO('c', 100) 9 9 #define KCOV_DISABLE _IO('c', 101) 10 10 11 + enum { 12 + /* 13 + * Tracing coverage collection mode. 14 + * Covered PCs are collected in a per-task buffer. 15 + * In new KCOV version the mode is chosen by calling 16 + * ioctl(fd, KCOV_ENABLE, mode). In older versions the mode argument 17 + * was supposed to be 0 in such a call. So, for reasons of backward 18 + * compatibility, we have chosen the value KCOV_TRACE_PC to be 0. 19 + */ 20 + KCOV_TRACE_PC = 0, 21 + /* Collecting comparison operands mode. */ 22 + KCOV_TRACE_CMP = 1, 23 + }; 24 + 25 + /* 26 + * The format for the types of collected comparisons. 27 + * 28 + * Bit 0 shows whether one of the arguments is a compile-time constant. 29 + * Bits 1 & 2 contain log2 of the argument size, up to 8 bytes. 30 + */ 31 + #define KCOV_CMP_CONST (1 << 0) 32 + #define KCOV_CMP_SIZE(n) ((n) << 1) 33 + #define KCOV_CMP_MASK KCOV_CMP_SIZE(3) 34 + 11 35 #endif /* _LINUX_KCOV_IOCTLS_H */
+181 -37
kernel/kcov.c
··· 22 22 #include <linux/kcov.h> 23 23 #include <asm/setup.h> 24 24 25 + /* Number of 64-bit words written per one comparison: */ 26 + #define KCOV_WORDS_PER_CMP 4 27 + 25 28 /* 26 29 * kcov descriptor (one per opened debugfs file). 27 30 * State transitions of the descriptor: 28 31 * - initial state after open() 29 32 * - then there must be a single ioctl(KCOV_INIT_TRACE) call 30 33 * - then, mmap() call (several calls are allowed but not useful) 31 - * - then, repeated enable/disable for a task (only one task a time allowed) 34 + * - then, ioctl(KCOV_ENABLE, arg), where arg is 35 + * KCOV_TRACE_PC - to trace only the PCs 36 + * or 37 + * KCOV_TRACE_CMP - to trace only the comparison operands 38 + * - then, ioctl(KCOV_DISABLE) to disable the task. 39 + * Enabling/disabling ioctls can be repeated (only one task a time allowed). 32 40 */ 33 41 struct kcov { 34 42 /* ··· 56 48 struct task_struct *t; 57 49 }; 58 50 51 + static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) 52 + { 53 + enum kcov_mode mode; 54 + 55 + /* 56 + * We are interested in code coverage as a function of a syscall inputs, 57 + * so we ignore code executed in interrupts. 58 + */ 59 + if (!in_task()) 60 + return false; 61 + mode = READ_ONCE(t->kcov_mode); 62 + /* 63 + * There is some code that runs in interrupts but for which 64 + * in_interrupt() returns false (e.g. preempt_schedule_irq()). 65 + * READ_ONCE()/barrier() effectively provides load-acquire wrt 66 + * interrupts, there are paired barrier()/WRITE_ONCE() in 67 + * kcov_ioctl_locked(). 68 + */ 69 + barrier(); 70 + return mode == needed_mode; 71 + } 72 + 73 + static unsigned long canonicalize_ip(unsigned long ip) 74 + { 75 + #ifdef CONFIG_RANDOMIZE_BASE 76 + ip -= kaslr_offset(); 77 + #endif 78 + return ip; 79 + } 80 + 59 81 /* 60 82 * Entry point from instrumented code. 61 83 * This is called once per basic-block/edge. ··· 93 55 void notrace __sanitizer_cov_trace_pc(void) 94 56 { 95 57 struct task_struct *t; 96 - enum kcov_mode mode; 58 + unsigned long *area; 59 + unsigned long ip = canonicalize_ip(_RET_IP_); 60 + unsigned long pos; 97 61 98 62 t = current; 99 - /* 100 - * We are interested in code coverage as a function of a syscall inputs, 101 - * so we ignore code executed in interrupts. 102 - */ 103 - if (!in_task()) 63 + if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t)) 104 64 return; 105 - mode = READ_ONCE(t->kcov_mode); 106 - if (mode == KCOV_MODE_TRACE) { 107 - unsigned long *area; 108 - unsigned long pos; 109 - unsigned long ip = _RET_IP_; 110 65 111 - #ifdef CONFIG_RANDOMIZE_BASE 112 - ip -= kaslr_offset(); 113 - #endif 114 - 115 - /* 116 - * There is some code that runs in interrupts but for which 117 - * in_interrupt() returns false (e.g. preempt_schedule_irq()). 118 - * READ_ONCE()/barrier() effectively provides load-acquire wrt 119 - * interrupts, there are paired barrier()/WRITE_ONCE() in 120 - * kcov_ioctl_locked(). 121 - */ 122 - barrier(); 123 - area = t->kcov_area; 124 - /* The first word is number of subsequent PCs. */ 125 - pos = READ_ONCE(area[0]) + 1; 126 - if (likely(pos < t->kcov_size)) { 127 - area[pos] = ip; 128 - WRITE_ONCE(area[0], pos); 129 - } 66 + area = t->kcov_area; 67 + /* The first 64-bit word is the number of subsequent PCs. */ 68 + pos = READ_ONCE(area[0]) + 1; 69 + if (likely(pos < t->kcov_size)) { 70 + area[pos] = ip; 71 + WRITE_ONCE(area[0], pos); 130 72 } 131 73 } 132 74 EXPORT_SYMBOL(__sanitizer_cov_trace_pc); 75 + 76 + #ifdef CONFIG_KCOV_ENABLE_COMPARISONS 77 + static void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip) 78 + { 79 + struct task_struct *t; 80 + u64 *area; 81 + u64 count, start_index, end_pos, max_pos; 82 + 83 + t = current; 84 + if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t)) 85 + return; 86 + 87 + ip = canonicalize_ip(ip); 88 + 89 + /* 90 + * We write all comparison arguments and types as u64. 91 + * The buffer was allocated for t->kcov_size unsigned longs. 92 + */ 93 + area = (u64 *)t->kcov_area; 94 + max_pos = t->kcov_size * sizeof(unsigned long); 95 + 96 + count = READ_ONCE(area[0]); 97 + 98 + /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */ 99 + start_index = 1 + count * KCOV_WORDS_PER_CMP; 100 + end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64); 101 + if (likely(end_pos <= max_pos)) { 102 + area[start_index] = type; 103 + area[start_index + 1] = arg1; 104 + area[start_index + 2] = arg2; 105 + area[start_index + 3] = ip; 106 + WRITE_ONCE(area[0], count + 1); 107 + } 108 + } 109 + 110 + void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2) 111 + { 112 + write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_); 113 + } 114 + EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1); 115 + 116 + void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2) 117 + { 118 + write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_); 119 + } 120 + EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2); 121 + 122 + void notrace __sanitizer_cov_trace_cmp4(u16 arg1, u16 arg2) 123 + { 124 + write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_); 125 + } 126 + EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4); 127 + 128 + void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2) 129 + { 130 + write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_); 131 + } 132 + EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8); 133 + 134 + void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2) 135 + { 136 + write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2, 137 + _RET_IP_); 138 + } 139 + EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1); 140 + 141 + void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2) 142 + { 143 + write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2, 144 + _RET_IP_); 145 + } 146 + EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2); 147 + 148 + void notrace __sanitizer_cov_trace_const_cmp4(u16 arg1, u16 arg2) 149 + { 150 + write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2, 151 + _RET_IP_); 152 + } 153 + EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4); 154 + 155 + void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2) 156 + { 157 + write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2, 158 + _RET_IP_); 159 + } 160 + EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8); 161 + 162 + void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases) 163 + { 164 + u64 i; 165 + u64 count = cases[0]; 166 + u64 size = cases[1]; 167 + u64 type = KCOV_CMP_CONST; 168 + 169 + switch (size) { 170 + case 8: 171 + type |= KCOV_CMP_SIZE(0); 172 + break; 173 + case 16: 174 + type |= KCOV_CMP_SIZE(1); 175 + break; 176 + case 32: 177 + type |= KCOV_CMP_SIZE(2); 178 + break; 179 + case 64: 180 + type |= KCOV_CMP_SIZE(3); 181 + break; 182 + default: 183 + return; 184 + } 185 + for (i = 0; i < count; i++) 186 + write_comp_data(type, cases[i + 2], val, _RET_IP_); 187 + } 188 + EXPORT_SYMBOL(__sanitizer_cov_trace_switch); 189 + #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */ 133 190 134 191 static void kcov_get(struct kcov *kcov) 135 192 { ··· 262 129 /* Just to not leave dangling references behind. */ 263 130 kcov_task_init(t); 264 131 kcov->t = NULL; 132 + kcov->mode = KCOV_MODE_INIT; 265 133 spin_unlock(&kcov->lock); 266 134 kcov_put(kcov); 267 135 } ··· 281 147 282 148 spin_lock(&kcov->lock); 283 149 size = kcov->size * sizeof(unsigned long); 284 - if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 || 150 + if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 || 285 151 vma->vm_end - vma->vm_start != size) { 286 152 res = -EINVAL; 287 153 goto exit; ··· 310 176 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL); 311 177 if (!kcov) 312 178 return -ENOMEM; 179 + kcov->mode = KCOV_MODE_DISABLED; 313 180 atomic_set(&kcov->refcount, 1); 314 181 spin_lock_init(&kcov->lock); 315 182 filep->private_data = kcov; ··· 346 211 if (size < 2 || size > INT_MAX / sizeof(unsigned long)) 347 212 return -EINVAL; 348 213 kcov->size = size; 349 - kcov->mode = KCOV_MODE_TRACE; 214 + kcov->mode = KCOV_MODE_INIT; 350 215 return 0; 351 216 case KCOV_ENABLE: 352 217 /* ··· 356 221 * at task exit or voluntary by KCOV_DISABLE. After that it can 357 222 * be enabled for another task. 358 223 */ 359 - unused = arg; 360 - if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED || 361 - kcov->area == NULL) 224 + if (kcov->mode != KCOV_MODE_INIT || !kcov->area) 362 225 return -EINVAL; 363 226 if (kcov->t != NULL) 364 227 return -EBUSY; 228 + if (arg == KCOV_TRACE_PC) 229 + kcov->mode = KCOV_MODE_TRACE_PC; 230 + else if (arg == KCOV_TRACE_CMP) 231 + #ifdef CONFIG_KCOV_ENABLE_COMPARISONS 232 + kcov->mode = KCOV_MODE_TRACE_CMP; 233 + #else 234 + return -ENOTSUPP; 235 + #endif 236 + else 237 + return -EINVAL; 365 238 t = current; 366 239 /* Cache in task struct for performance. */ 367 240 t->kcov_size = kcov->size; 368 241 t->kcov_area = kcov->area; 369 - /* See comment in __sanitizer_cov_trace_pc(). */ 242 + /* See comment in check_kcov_mode(). */ 370 243 barrier(); 371 244 WRITE_ONCE(t->kcov_mode, kcov->mode); 372 245 t->kcov = kcov; ··· 392 249 return -EINVAL; 393 250 kcov_task_init(t); 394 251 kcov->t = NULL; 252 + kcov->mode = KCOV_MODE_INIT; 395 253 kcov_put(kcov); 396 254 return 0; 397 255 default: