Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v6.1 2892 lines 73 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * DAMON sysfs Interface 4 * 5 * Copyright (c) 2022 SeongJae Park <sj@kernel.org> 6 */ 7 8#include <linux/damon.h> 9#include <linux/kobject.h> 10#include <linux/pid.h> 11#include <linux/sched.h> 12#include <linux/slab.h> 13 14static DEFINE_MUTEX(damon_sysfs_lock); 15 16/* 17 * unsigned long range directory 18 */ 19 20struct damon_sysfs_ul_range { 21 struct kobject kobj; 22 unsigned long min; 23 unsigned long max; 24}; 25 26static struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc( 27 unsigned long min, 28 unsigned long max) 29{ 30 struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range), 31 GFP_KERNEL); 32 33 if (!range) 34 return NULL; 35 range->kobj = (struct kobject){}; 36 range->min = min; 37 range->max = max; 38 39 return range; 40} 41 42static ssize_t min_show(struct kobject *kobj, struct kobj_attribute *attr, 43 char *buf) 44{ 45 struct damon_sysfs_ul_range *range = container_of(kobj, 46 struct damon_sysfs_ul_range, kobj); 47 48 return sysfs_emit(buf, "%lu\n", range->min); 49} 50 51static ssize_t min_store(struct kobject *kobj, struct kobj_attribute *attr, 52 const char *buf, size_t count) 53{ 54 struct damon_sysfs_ul_range *range = container_of(kobj, 55 struct damon_sysfs_ul_range, kobj); 56 unsigned long min; 57 int err; 58 59 err = kstrtoul(buf, 0, &min); 60 if (err) 61 return err; 62 63 range->min = min; 64 return count; 65} 66 67static ssize_t max_show(struct kobject *kobj, struct kobj_attribute *attr, 68 char *buf) 69{ 70 struct damon_sysfs_ul_range *range = container_of(kobj, 71 struct damon_sysfs_ul_range, kobj); 72 73 return sysfs_emit(buf, "%lu\n", range->max); 74} 75 76static ssize_t max_store(struct kobject *kobj, struct kobj_attribute *attr, 77 const char *buf, size_t count) 78{ 79 struct damon_sysfs_ul_range *range = container_of(kobj, 80 struct damon_sysfs_ul_range, kobj); 81 unsigned long max; 82 int err; 83 84 err = kstrtoul(buf, 0, &max); 85 if (err) 86 return err; 87 88 range->max = max; 89 return count; 90} 91 92static void damon_sysfs_ul_range_release(struct kobject *kobj) 93{ 94 kfree(container_of(kobj, struct damon_sysfs_ul_range, kobj)); 95} 96 97static struct kobj_attribute damon_sysfs_ul_range_min_attr = 98 __ATTR_RW_MODE(min, 0600); 99 100static struct kobj_attribute damon_sysfs_ul_range_max_attr = 101 __ATTR_RW_MODE(max, 0600); 102 103static struct attribute *damon_sysfs_ul_range_attrs[] = { 104 &damon_sysfs_ul_range_min_attr.attr, 105 &damon_sysfs_ul_range_max_attr.attr, 106 NULL, 107}; 108ATTRIBUTE_GROUPS(damon_sysfs_ul_range); 109 110static struct kobj_type damon_sysfs_ul_range_ktype = { 111 .release = damon_sysfs_ul_range_release, 112 .sysfs_ops = &kobj_sysfs_ops, 113 .default_groups = damon_sysfs_ul_range_groups, 114}; 115 116/* 117 * schemes/stats directory 118 */ 119 120struct damon_sysfs_stats { 121 struct kobject kobj; 122 unsigned long nr_tried; 123 unsigned long sz_tried; 124 unsigned long nr_applied; 125 unsigned long sz_applied; 126 unsigned long qt_exceeds; 127}; 128 129static struct damon_sysfs_stats *damon_sysfs_stats_alloc(void) 130{ 131 return kzalloc(sizeof(struct damon_sysfs_stats), GFP_KERNEL); 132} 133 134static ssize_t nr_tried_show(struct kobject *kobj, struct kobj_attribute *attr, 135 char *buf) 136{ 137 struct damon_sysfs_stats *stats = container_of(kobj, 138 struct damon_sysfs_stats, kobj); 139 140 return sysfs_emit(buf, "%lu\n", stats->nr_tried); 141} 142 143static ssize_t sz_tried_show(struct kobject *kobj, struct kobj_attribute *attr, 144 char *buf) 145{ 146 struct damon_sysfs_stats *stats = container_of(kobj, 147 struct damon_sysfs_stats, kobj); 148 149 return sysfs_emit(buf, "%lu\n", stats->sz_tried); 150} 151 152static ssize_t nr_applied_show(struct kobject *kobj, 153 struct kobj_attribute *attr, char *buf) 154{ 155 struct damon_sysfs_stats *stats = container_of(kobj, 156 struct damon_sysfs_stats, kobj); 157 158 return sysfs_emit(buf, "%lu\n", stats->nr_applied); 159} 160 161static ssize_t sz_applied_show(struct kobject *kobj, 162 struct kobj_attribute *attr, char *buf) 163{ 164 struct damon_sysfs_stats *stats = container_of(kobj, 165 struct damon_sysfs_stats, kobj); 166 167 return sysfs_emit(buf, "%lu\n", stats->sz_applied); 168} 169 170static ssize_t qt_exceeds_show(struct kobject *kobj, 171 struct kobj_attribute *attr, char *buf) 172{ 173 struct damon_sysfs_stats *stats = container_of(kobj, 174 struct damon_sysfs_stats, kobj); 175 176 return sysfs_emit(buf, "%lu\n", stats->qt_exceeds); 177} 178 179static void damon_sysfs_stats_release(struct kobject *kobj) 180{ 181 kfree(container_of(kobj, struct damon_sysfs_stats, kobj)); 182} 183 184static struct kobj_attribute damon_sysfs_stats_nr_tried_attr = 185 __ATTR_RO_MODE(nr_tried, 0400); 186 187static struct kobj_attribute damon_sysfs_stats_sz_tried_attr = 188 __ATTR_RO_MODE(sz_tried, 0400); 189 190static struct kobj_attribute damon_sysfs_stats_nr_applied_attr = 191 __ATTR_RO_MODE(nr_applied, 0400); 192 193static struct kobj_attribute damon_sysfs_stats_sz_applied_attr = 194 __ATTR_RO_MODE(sz_applied, 0400); 195 196static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr = 197 __ATTR_RO_MODE(qt_exceeds, 0400); 198 199static struct attribute *damon_sysfs_stats_attrs[] = { 200 &damon_sysfs_stats_nr_tried_attr.attr, 201 &damon_sysfs_stats_sz_tried_attr.attr, 202 &damon_sysfs_stats_nr_applied_attr.attr, 203 &damon_sysfs_stats_sz_applied_attr.attr, 204 &damon_sysfs_stats_qt_exceeds_attr.attr, 205 NULL, 206}; 207ATTRIBUTE_GROUPS(damon_sysfs_stats); 208 209static struct kobj_type damon_sysfs_stats_ktype = { 210 .release = damon_sysfs_stats_release, 211 .sysfs_ops = &kobj_sysfs_ops, 212 .default_groups = damon_sysfs_stats_groups, 213}; 214 215/* 216 * watermarks directory 217 */ 218 219struct damon_sysfs_watermarks { 220 struct kobject kobj; 221 enum damos_wmark_metric metric; 222 unsigned long interval_us; 223 unsigned long high; 224 unsigned long mid; 225 unsigned long low; 226}; 227 228static struct damon_sysfs_watermarks *damon_sysfs_watermarks_alloc( 229 enum damos_wmark_metric metric, unsigned long interval_us, 230 unsigned long high, unsigned long mid, unsigned long low) 231{ 232 struct damon_sysfs_watermarks *watermarks = kmalloc( 233 sizeof(*watermarks), GFP_KERNEL); 234 235 if (!watermarks) 236 return NULL; 237 watermarks->kobj = (struct kobject){}; 238 watermarks->metric = metric; 239 watermarks->interval_us = interval_us; 240 watermarks->high = high; 241 watermarks->mid = mid; 242 watermarks->low = low; 243 return watermarks; 244} 245 246/* Should match with enum damos_wmark_metric */ 247static const char * const damon_sysfs_wmark_metric_strs[] = { 248 "none", 249 "free_mem_rate", 250}; 251 252static ssize_t metric_show(struct kobject *kobj, struct kobj_attribute *attr, 253 char *buf) 254{ 255 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 256 struct damon_sysfs_watermarks, kobj); 257 258 return sysfs_emit(buf, "%s\n", 259 damon_sysfs_wmark_metric_strs[watermarks->metric]); 260} 261 262static ssize_t metric_store(struct kobject *kobj, struct kobj_attribute *attr, 263 const char *buf, size_t count) 264{ 265 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 266 struct damon_sysfs_watermarks, kobj); 267 enum damos_wmark_metric metric; 268 269 for (metric = 0; metric < NR_DAMOS_WMARK_METRICS; metric++) { 270 if (sysfs_streq(buf, damon_sysfs_wmark_metric_strs[metric])) { 271 watermarks->metric = metric; 272 return count; 273 } 274 } 275 return -EINVAL; 276} 277 278static ssize_t interval_us_show(struct kobject *kobj, 279 struct kobj_attribute *attr, char *buf) 280{ 281 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 282 struct damon_sysfs_watermarks, kobj); 283 284 return sysfs_emit(buf, "%lu\n", watermarks->interval_us); 285} 286 287static ssize_t interval_us_store(struct kobject *kobj, 288 struct kobj_attribute *attr, const char *buf, size_t count) 289{ 290 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 291 struct damon_sysfs_watermarks, kobj); 292 int err = kstrtoul(buf, 0, &watermarks->interval_us); 293 294 return err ? err : count; 295} 296 297static ssize_t high_show(struct kobject *kobj, 298 struct kobj_attribute *attr, char *buf) 299{ 300 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 301 struct damon_sysfs_watermarks, kobj); 302 303 return sysfs_emit(buf, "%lu\n", watermarks->high); 304} 305 306static ssize_t high_store(struct kobject *kobj, 307 struct kobj_attribute *attr, const char *buf, size_t count) 308{ 309 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 310 struct damon_sysfs_watermarks, kobj); 311 int err = kstrtoul(buf, 0, &watermarks->high); 312 313 return err ? err : count; 314} 315 316static ssize_t mid_show(struct kobject *kobj, 317 struct kobj_attribute *attr, char *buf) 318{ 319 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 320 struct damon_sysfs_watermarks, kobj); 321 322 return sysfs_emit(buf, "%lu\n", watermarks->mid); 323} 324 325static ssize_t mid_store(struct kobject *kobj, 326 struct kobj_attribute *attr, const char *buf, size_t count) 327{ 328 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 329 struct damon_sysfs_watermarks, kobj); 330 int err = kstrtoul(buf, 0, &watermarks->mid); 331 332 return err ? err : count; 333} 334 335static ssize_t low_show(struct kobject *kobj, 336 struct kobj_attribute *attr, char *buf) 337{ 338 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 339 struct damon_sysfs_watermarks, kobj); 340 341 return sysfs_emit(buf, "%lu\n", watermarks->low); 342} 343 344static ssize_t low_store(struct kobject *kobj, 345 struct kobj_attribute *attr, const char *buf, size_t count) 346{ 347 struct damon_sysfs_watermarks *watermarks = container_of(kobj, 348 struct damon_sysfs_watermarks, kobj); 349 int err = kstrtoul(buf, 0, &watermarks->low); 350 351 return err ? err : count; 352} 353 354static void damon_sysfs_watermarks_release(struct kobject *kobj) 355{ 356 kfree(container_of(kobj, struct damon_sysfs_watermarks, kobj)); 357} 358 359static struct kobj_attribute damon_sysfs_watermarks_metric_attr = 360 __ATTR_RW_MODE(metric, 0600); 361 362static struct kobj_attribute damon_sysfs_watermarks_interval_us_attr = 363 __ATTR_RW_MODE(interval_us, 0600); 364 365static struct kobj_attribute damon_sysfs_watermarks_high_attr = 366 __ATTR_RW_MODE(high, 0600); 367 368static struct kobj_attribute damon_sysfs_watermarks_mid_attr = 369 __ATTR_RW_MODE(mid, 0600); 370 371static struct kobj_attribute damon_sysfs_watermarks_low_attr = 372 __ATTR_RW_MODE(low, 0600); 373 374static struct attribute *damon_sysfs_watermarks_attrs[] = { 375 &damon_sysfs_watermarks_metric_attr.attr, 376 &damon_sysfs_watermarks_interval_us_attr.attr, 377 &damon_sysfs_watermarks_high_attr.attr, 378 &damon_sysfs_watermarks_mid_attr.attr, 379 &damon_sysfs_watermarks_low_attr.attr, 380 NULL, 381}; 382ATTRIBUTE_GROUPS(damon_sysfs_watermarks); 383 384static struct kobj_type damon_sysfs_watermarks_ktype = { 385 .release = damon_sysfs_watermarks_release, 386 .sysfs_ops = &kobj_sysfs_ops, 387 .default_groups = damon_sysfs_watermarks_groups, 388}; 389 390/* 391 * scheme/weights directory 392 */ 393 394struct damon_sysfs_weights { 395 struct kobject kobj; 396 unsigned int sz; 397 unsigned int nr_accesses; 398 unsigned int age; 399}; 400 401static struct damon_sysfs_weights *damon_sysfs_weights_alloc(unsigned int sz, 402 unsigned int nr_accesses, unsigned int age) 403{ 404 struct damon_sysfs_weights *weights = kmalloc(sizeof(*weights), 405 GFP_KERNEL); 406 407 if (!weights) 408 return NULL; 409 weights->kobj = (struct kobject){}; 410 weights->sz = sz; 411 weights->nr_accesses = nr_accesses; 412 weights->age = age; 413 return weights; 414} 415 416static ssize_t sz_permil_show(struct kobject *kobj, 417 struct kobj_attribute *attr, char *buf) 418{ 419 struct damon_sysfs_weights *weights = container_of(kobj, 420 struct damon_sysfs_weights, kobj); 421 422 return sysfs_emit(buf, "%u\n", weights->sz); 423} 424 425static ssize_t sz_permil_store(struct kobject *kobj, 426 struct kobj_attribute *attr, const char *buf, size_t count) 427{ 428 struct damon_sysfs_weights *weights = container_of(kobj, 429 struct damon_sysfs_weights, kobj); 430 int err = kstrtouint(buf, 0, &weights->sz); 431 432 return err ? err : count; 433} 434 435static ssize_t nr_accesses_permil_show(struct kobject *kobj, 436 struct kobj_attribute *attr, char *buf) 437{ 438 struct damon_sysfs_weights *weights = container_of(kobj, 439 struct damon_sysfs_weights, kobj); 440 441 return sysfs_emit(buf, "%u\n", weights->nr_accesses); 442} 443 444static ssize_t nr_accesses_permil_store(struct kobject *kobj, 445 struct kobj_attribute *attr, const char *buf, size_t count) 446{ 447 struct damon_sysfs_weights *weights = container_of(kobj, 448 struct damon_sysfs_weights, kobj); 449 int err = kstrtouint(buf, 0, &weights->nr_accesses); 450 451 return err ? err : count; 452} 453 454static ssize_t age_permil_show(struct kobject *kobj, 455 struct kobj_attribute *attr, char *buf) 456{ 457 struct damon_sysfs_weights *weights = container_of(kobj, 458 struct damon_sysfs_weights, kobj); 459 460 return sysfs_emit(buf, "%u\n", weights->age); 461} 462 463static ssize_t age_permil_store(struct kobject *kobj, 464 struct kobj_attribute *attr, const char *buf, size_t count) 465{ 466 struct damon_sysfs_weights *weights = container_of(kobj, 467 struct damon_sysfs_weights, kobj); 468 int err = kstrtouint(buf, 0, &weights->age); 469 470 return err ? err : count; 471} 472 473static void damon_sysfs_weights_release(struct kobject *kobj) 474{ 475 kfree(container_of(kobj, struct damon_sysfs_weights, kobj)); 476} 477 478static struct kobj_attribute damon_sysfs_weights_sz_attr = 479 __ATTR_RW_MODE(sz_permil, 0600); 480 481static struct kobj_attribute damon_sysfs_weights_nr_accesses_attr = 482 __ATTR_RW_MODE(nr_accesses_permil, 0600); 483 484static struct kobj_attribute damon_sysfs_weights_age_attr = 485 __ATTR_RW_MODE(age_permil, 0600); 486 487static struct attribute *damon_sysfs_weights_attrs[] = { 488 &damon_sysfs_weights_sz_attr.attr, 489 &damon_sysfs_weights_nr_accesses_attr.attr, 490 &damon_sysfs_weights_age_attr.attr, 491 NULL, 492}; 493ATTRIBUTE_GROUPS(damon_sysfs_weights); 494 495static struct kobj_type damon_sysfs_weights_ktype = { 496 .release = damon_sysfs_weights_release, 497 .sysfs_ops = &kobj_sysfs_ops, 498 .default_groups = damon_sysfs_weights_groups, 499}; 500 501/* 502 * quotas directory 503 */ 504 505struct damon_sysfs_quotas { 506 struct kobject kobj; 507 struct damon_sysfs_weights *weights; 508 unsigned long ms; 509 unsigned long sz; 510 unsigned long reset_interval_ms; 511}; 512 513static struct damon_sysfs_quotas *damon_sysfs_quotas_alloc(void) 514{ 515 return kzalloc(sizeof(struct damon_sysfs_quotas), GFP_KERNEL); 516} 517 518static int damon_sysfs_quotas_add_dirs(struct damon_sysfs_quotas *quotas) 519{ 520 struct damon_sysfs_weights *weights; 521 int err; 522 523 weights = damon_sysfs_weights_alloc(0, 0, 0); 524 if (!weights) 525 return -ENOMEM; 526 527 err = kobject_init_and_add(&weights->kobj, &damon_sysfs_weights_ktype, 528 &quotas->kobj, "weights"); 529 if (err) 530 kobject_put(&weights->kobj); 531 else 532 quotas->weights = weights; 533 return err; 534} 535 536static void damon_sysfs_quotas_rm_dirs(struct damon_sysfs_quotas *quotas) 537{ 538 kobject_put(&quotas->weights->kobj); 539} 540 541static ssize_t ms_show(struct kobject *kobj, struct kobj_attribute *attr, 542 char *buf) 543{ 544 struct damon_sysfs_quotas *quotas = container_of(kobj, 545 struct damon_sysfs_quotas, kobj); 546 547 return sysfs_emit(buf, "%lu\n", quotas->ms); 548} 549 550static ssize_t ms_store(struct kobject *kobj, struct kobj_attribute *attr, 551 const char *buf, size_t count) 552{ 553 struct damon_sysfs_quotas *quotas = container_of(kobj, 554 struct damon_sysfs_quotas, kobj); 555 int err = kstrtoul(buf, 0, &quotas->ms); 556 557 if (err) 558 return -EINVAL; 559 return count; 560} 561 562static ssize_t bytes_show(struct kobject *kobj, struct kobj_attribute *attr, 563 char *buf) 564{ 565 struct damon_sysfs_quotas *quotas = container_of(kobj, 566 struct damon_sysfs_quotas, kobj); 567 568 return sysfs_emit(buf, "%lu\n", quotas->sz); 569} 570 571static ssize_t bytes_store(struct kobject *kobj, 572 struct kobj_attribute *attr, const char *buf, size_t count) 573{ 574 struct damon_sysfs_quotas *quotas = container_of(kobj, 575 struct damon_sysfs_quotas, kobj); 576 int err = kstrtoul(buf, 0, &quotas->sz); 577 578 if (err) 579 return -EINVAL; 580 return count; 581} 582 583static ssize_t reset_interval_ms_show(struct kobject *kobj, 584 struct kobj_attribute *attr, char *buf) 585{ 586 struct damon_sysfs_quotas *quotas = container_of(kobj, 587 struct damon_sysfs_quotas, kobj); 588 589 return sysfs_emit(buf, "%lu\n", quotas->reset_interval_ms); 590} 591 592static ssize_t reset_interval_ms_store(struct kobject *kobj, 593 struct kobj_attribute *attr, const char *buf, size_t count) 594{ 595 struct damon_sysfs_quotas *quotas = container_of(kobj, 596 struct damon_sysfs_quotas, kobj); 597 int err = kstrtoul(buf, 0, &quotas->reset_interval_ms); 598 599 if (err) 600 return -EINVAL; 601 return count; 602} 603 604static void damon_sysfs_quotas_release(struct kobject *kobj) 605{ 606 kfree(container_of(kobj, struct damon_sysfs_quotas, kobj)); 607} 608 609static struct kobj_attribute damon_sysfs_quotas_ms_attr = 610 __ATTR_RW_MODE(ms, 0600); 611 612static struct kobj_attribute damon_sysfs_quotas_sz_attr = 613 __ATTR_RW_MODE(bytes, 0600); 614 615static struct kobj_attribute damon_sysfs_quotas_reset_interval_ms_attr = 616 __ATTR_RW_MODE(reset_interval_ms, 0600); 617 618static struct attribute *damon_sysfs_quotas_attrs[] = { 619 &damon_sysfs_quotas_ms_attr.attr, 620 &damon_sysfs_quotas_sz_attr.attr, 621 &damon_sysfs_quotas_reset_interval_ms_attr.attr, 622 NULL, 623}; 624ATTRIBUTE_GROUPS(damon_sysfs_quotas); 625 626static struct kobj_type damon_sysfs_quotas_ktype = { 627 .release = damon_sysfs_quotas_release, 628 .sysfs_ops = &kobj_sysfs_ops, 629 .default_groups = damon_sysfs_quotas_groups, 630}; 631 632/* 633 * access_pattern directory 634 */ 635 636struct damon_sysfs_access_pattern { 637 struct kobject kobj; 638 struct damon_sysfs_ul_range *sz; 639 struct damon_sysfs_ul_range *nr_accesses; 640 struct damon_sysfs_ul_range *age; 641}; 642 643static 644struct damon_sysfs_access_pattern *damon_sysfs_access_pattern_alloc(void) 645{ 646 struct damon_sysfs_access_pattern *access_pattern = 647 kmalloc(sizeof(*access_pattern), GFP_KERNEL); 648 649 if (!access_pattern) 650 return NULL; 651 access_pattern->kobj = (struct kobject){}; 652 return access_pattern; 653} 654 655static int damon_sysfs_access_pattern_add_range_dir( 656 struct damon_sysfs_access_pattern *access_pattern, 657 struct damon_sysfs_ul_range **range_dir_ptr, 658 char *name) 659{ 660 struct damon_sysfs_ul_range *range = damon_sysfs_ul_range_alloc(0, 0); 661 int err; 662 663 if (!range) 664 return -ENOMEM; 665 err = kobject_init_and_add(&range->kobj, &damon_sysfs_ul_range_ktype, 666 &access_pattern->kobj, name); 667 if (err) 668 kobject_put(&range->kobj); 669 else 670 *range_dir_ptr = range; 671 return err; 672} 673 674static int damon_sysfs_access_pattern_add_dirs( 675 struct damon_sysfs_access_pattern *access_pattern) 676{ 677 int err; 678 679 err = damon_sysfs_access_pattern_add_range_dir(access_pattern, 680 &access_pattern->sz, "sz"); 681 if (err) 682 goto put_sz_out; 683 684 err = damon_sysfs_access_pattern_add_range_dir(access_pattern, 685 &access_pattern->nr_accesses, "nr_accesses"); 686 if (err) 687 goto put_nr_accesses_sz_out; 688 689 err = damon_sysfs_access_pattern_add_range_dir(access_pattern, 690 &access_pattern->age, "age"); 691 if (err) 692 goto put_age_nr_accesses_sz_out; 693 return 0; 694 695put_age_nr_accesses_sz_out: 696 kobject_put(&access_pattern->age->kobj); 697 access_pattern->age = NULL; 698put_nr_accesses_sz_out: 699 kobject_put(&access_pattern->nr_accesses->kobj); 700 access_pattern->nr_accesses = NULL; 701put_sz_out: 702 kobject_put(&access_pattern->sz->kobj); 703 access_pattern->sz = NULL; 704 return err; 705} 706 707static void damon_sysfs_access_pattern_rm_dirs( 708 struct damon_sysfs_access_pattern *access_pattern) 709{ 710 kobject_put(&access_pattern->sz->kobj); 711 kobject_put(&access_pattern->nr_accesses->kobj); 712 kobject_put(&access_pattern->age->kobj); 713} 714 715static void damon_sysfs_access_pattern_release(struct kobject *kobj) 716{ 717 kfree(container_of(kobj, struct damon_sysfs_access_pattern, kobj)); 718} 719 720static struct attribute *damon_sysfs_access_pattern_attrs[] = { 721 NULL, 722}; 723ATTRIBUTE_GROUPS(damon_sysfs_access_pattern); 724 725static struct kobj_type damon_sysfs_access_pattern_ktype = { 726 .release = damon_sysfs_access_pattern_release, 727 .sysfs_ops = &kobj_sysfs_ops, 728 .default_groups = damon_sysfs_access_pattern_groups, 729}; 730 731/* 732 * scheme directory 733 */ 734 735struct damon_sysfs_scheme { 736 struct kobject kobj; 737 enum damos_action action; 738 struct damon_sysfs_access_pattern *access_pattern; 739 struct damon_sysfs_quotas *quotas; 740 struct damon_sysfs_watermarks *watermarks; 741 struct damon_sysfs_stats *stats; 742}; 743 744/* This should match with enum damos_action */ 745static const char * const damon_sysfs_damos_action_strs[] = { 746 "willneed", 747 "cold", 748 "pageout", 749 "hugepage", 750 "nohugepage", 751 "lru_prio", 752 "lru_deprio", 753 "stat", 754}; 755 756static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc( 757 enum damos_action action) 758{ 759 struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme), 760 GFP_KERNEL); 761 762 if (!scheme) 763 return NULL; 764 scheme->kobj = (struct kobject){}; 765 scheme->action = action; 766 return scheme; 767} 768 769static int damon_sysfs_scheme_set_access_pattern( 770 struct damon_sysfs_scheme *scheme) 771{ 772 struct damon_sysfs_access_pattern *access_pattern; 773 int err; 774 775 access_pattern = damon_sysfs_access_pattern_alloc(); 776 if (!access_pattern) 777 return -ENOMEM; 778 err = kobject_init_and_add(&access_pattern->kobj, 779 &damon_sysfs_access_pattern_ktype, &scheme->kobj, 780 "access_pattern"); 781 if (err) 782 goto out; 783 err = damon_sysfs_access_pattern_add_dirs(access_pattern); 784 if (err) 785 goto out; 786 scheme->access_pattern = access_pattern; 787 return 0; 788 789out: 790 kobject_put(&access_pattern->kobj); 791 return err; 792} 793 794static int damon_sysfs_scheme_set_quotas(struct damon_sysfs_scheme *scheme) 795{ 796 struct damon_sysfs_quotas *quotas = damon_sysfs_quotas_alloc(); 797 int err; 798 799 if (!quotas) 800 return -ENOMEM; 801 err = kobject_init_and_add(&quotas->kobj, &damon_sysfs_quotas_ktype, 802 &scheme->kobj, "quotas"); 803 if (err) 804 goto out; 805 err = damon_sysfs_quotas_add_dirs(quotas); 806 if (err) 807 goto out; 808 scheme->quotas = quotas; 809 return 0; 810 811out: 812 kobject_put(&quotas->kobj); 813 return err; 814} 815 816static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme) 817{ 818 struct damon_sysfs_watermarks *watermarks = 819 damon_sysfs_watermarks_alloc(DAMOS_WMARK_NONE, 0, 0, 0, 0); 820 int err; 821 822 if (!watermarks) 823 return -ENOMEM; 824 err = kobject_init_and_add(&watermarks->kobj, 825 &damon_sysfs_watermarks_ktype, &scheme->kobj, 826 "watermarks"); 827 if (err) 828 kobject_put(&watermarks->kobj); 829 else 830 scheme->watermarks = watermarks; 831 return err; 832} 833 834static int damon_sysfs_scheme_set_stats(struct damon_sysfs_scheme *scheme) 835{ 836 struct damon_sysfs_stats *stats = damon_sysfs_stats_alloc(); 837 int err; 838 839 if (!stats) 840 return -ENOMEM; 841 err = kobject_init_and_add(&stats->kobj, &damon_sysfs_stats_ktype, 842 &scheme->kobj, "stats"); 843 if (err) 844 kobject_put(&stats->kobj); 845 else 846 scheme->stats = stats; 847 return err; 848} 849 850static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme) 851{ 852 int err; 853 854 err = damon_sysfs_scheme_set_access_pattern(scheme); 855 if (err) 856 return err; 857 err = damon_sysfs_scheme_set_quotas(scheme); 858 if (err) 859 goto put_access_pattern_out; 860 err = damon_sysfs_scheme_set_watermarks(scheme); 861 if (err) 862 goto put_quotas_access_pattern_out; 863 err = damon_sysfs_scheme_set_stats(scheme); 864 if (err) 865 goto put_watermarks_quotas_access_pattern_out; 866 return 0; 867 868put_watermarks_quotas_access_pattern_out: 869 kobject_put(&scheme->watermarks->kobj); 870 scheme->watermarks = NULL; 871put_quotas_access_pattern_out: 872 kobject_put(&scheme->quotas->kobj); 873 scheme->quotas = NULL; 874put_access_pattern_out: 875 kobject_put(&scheme->access_pattern->kobj); 876 scheme->access_pattern = NULL; 877 return err; 878} 879 880static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme) 881{ 882 damon_sysfs_access_pattern_rm_dirs(scheme->access_pattern); 883 kobject_put(&scheme->access_pattern->kobj); 884 damon_sysfs_quotas_rm_dirs(scheme->quotas); 885 kobject_put(&scheme->quotas->kobj); 886 kobject_put(&scheme->watermarks->kobj); 887 kobject_put(&scheme->stats->kobj); 888} 889 890static ssize_t action_show(struct kobject *kobj, struct kobj_attribute *attr, 891 char *buf) 892{ 893 struct damon_sysfs_scheme *scheme = container_of(kobj, 894 struct damon_sysfs_scheme, kobj); 895 896 return sysfs_emit(buf, "%s\n", 897 damon_sysfs_damos_action_strs[scheme->action]); 898} 899 900static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr, 901 const char *buf, size_t count) 902{ 903 struct damon_sysfs_scheme *scheme = container_of(kobj, 904 struct damon_sysfs_scheme, kobj); 905 enum damos_action action; 906 907 for (action = 0; action < NR_DAMOS_ACTIONS; action++) { 908 if (sysfs_streq(buf, damon_sysfs_damos_action_strs[action])) { 909 scheme->action = action; 910 return count; 911 } 912 } 913 return -EINVAL; 914} 915 916static void damon_sysfs_scheme_release(struct kobject *kobj) 917{ 918 kfree(container_of(kobj, struct damon_sysfs_scheme, kobj)); 919} 920 921static struct kobj_attribute damon_sysfs_scheme_action_attr = 922 __ATTR_RW_MODE(action, 0600); 923 924static struct attribute *damon_sysfs_scheme_attrs[] = { 925 &damon_sysfs_scheme_action_attr.attr, 926 NULL, 927}; 928ATTRIBUTE_GROUPS(damon_sysfs_scheme); 929 930static struct kobj_type damon_sysfs_scheme_ktype = { 931 .release = damon_sysfs_scheme_release, 932 .sysfs_ops = &kobj_sysfs_ops, 933 .default_groups = damon_sysfs_scheme_groups, 934}; 935 936/* 937 * schemes directory 938 */ 939 940struct damon_sysfs_schemes { 941 struct kobject kobj; 942 struct damon_sysfs_scheme **schemes_arr; 943 int nr; 944}; 945 946static struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void) 947{ 948 return kzalloc(sizeof(struct damon_sysfs_schemes), GFP_KERNEL); 949} 950 951static void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes) 952{ 953 struct damon_sysfs_scheme **schemes_arr = schemes->schemes_arr; 954 int i; 955 956 for (i = 0; i < schemes->nr; i++) { 957 damon_sysfs_scheme_rm_dirs(schemes_arr[i]); 958 kobject_put(&schemes_arr[i]->kobj); 959 } 960 schemes->nr = 0; 961 kfree(schemes_arr); 962 schemes->schemes_arr = NULL; 963} 964 965static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes, 966 int nr_schemes) 967{ 968 struct damon_sysfs_scheme **schemes_arr, *scheme; 969 int err, i; 970 971 damon_sysfs_schemes_rm_dirs(schemes); 972 if (!nr_schemes) 973 return 0; 974 975 schemes_arr = kmalloc_array(nr_schemes, sizeof(*schemes_arr), 976 GFP_KERNEL | __GFP_NOWARN); 977 if (!schemes_arr) 978 return -ENOMEM; 979 schemes->schemes_arr = schemes_arr; 980 981 for (i = 0; i < nr_schemes; i++) { 982 scheme = damon_sysfs_scheme_alloc(DAMOS_STAT); 983 if (!scheme) { 984 damon_sysfs_schemes_rm_dirs(schemes); 985 return -ENOMEM; 986 } 987 988 err = kobject_init_and_add(&scheme->kobj, 989 &damon_sysfs_scheme_ktype, &schemes->kobj, 990 "%d", i); 991 if (err) 992 goto out; 993 err = damon_sysfs_scheme_add_dirs(scheme); 994 if (err) 995 goto out; 996 997 schemes_arr[i] = scheme; 998 schemes->nr++; 999 } 1000 return 0; 1001 1002out: 1003 damon_sysfs_schemes_rm_dirs(schemes); 1004 kobject_put(&scheme->kobj); 1005 return err; 1006} 1007 1008static ssize_t nr_schemes_show(struct kobject *kobj, 1009 struct kobj_attribute *attr, char *buf) 1010{ 1011 struct damon_sysfs_schemes *schemes = container_of(kobj, 1012 struct damon_sysfs_schemes, kobj); 1013 1014 return sysfs_emit(buf, "%d\n", schemes->nr); 1015} 1016 1017static ssize_t nr_schemes_store(struct kobject *kobj, 1018 struct kobj_attribute *attr, const char *buf, size_t count) 1019{ 1020 struct damon_sysfs_schemes *schemes; 1021 int nr, err = kstrtoint(buf, 0, &nr); 1022 1023 if (err) 1024 return err; 1025 if (nr < 0) 1026 return -EINVAL; 1027 1028 schemes = container_of(kobj, struct damon_sysfs_schemes, kobj); 1029 1030 if (!mutex_trylock(&damon_sysfs_lock)) 1031 return -EBUSY; 1032 err = damon_sysfs_schemes_add_dirs(schemes, nr); 1033 mutex_unlock(&damon_sysfs_lock); 1034 if (err) 1035 return err; 1036 return count; 1037} 1038 1039static void damon_sysfs_schemes_release(struct kobject *kobj) 1040{ 1041 kfree(container_of(kobj, struct damon_sysfs_schemes, kobj)); 1042} 1043 1044static struct kobj_attribute damon_sysfs_schemes_nr_attr = 1045 __ATTR_RW_MODE(nr_schemes, 0600); 1046 1047static struct attribute *damon_sysfs_schemes_attrs[] = { 1048 &damon_sysfs_schemes_nr_attr.attr, 1049 NULL, 1050}; 1051ATTRIBUTE_GROUPS(damon_sysfs_schemes); 1052 1053static struct kobj_type damon_sysfs_schemes_ktype = { 1054 .release = damon_sysfs_schemes_release, 1055 .sysfs_ops = &kobj_sysfs_ops, 1056 .default_groups = damon_sysfs_schemes_groups, 1057}; 1058 1059/* 1060 * init region directory 1061 */ 1062 1063struct damon_sysfs_region { 1064 struct kobject kobj; 1065 unsigned long start; 1066 unsigned long end; 1067}; 1068 1069static struct damon_sysfs_region *damon_sysfs_region_alloc( 1070 unsigned long start, 1071 unsigned long end) 1072{ 1073 struct damon_sysfs_region *region = kmalloc(sizeof(*region), 1074 GFP_KERNEL); 1075 1076 if (!region) 1077 return NULL; 1078 region->kobj = (struct kobject){}; 1079 region->start = start; 1080 region->end = end; 1081 return region; 1082} 1083 1084static ssize_t start_show(struct kobject *kobj, struct kobj_attribute *attr, 1085 char *buf) 1086{ 1087 struct damon_sysfs_region *region = container_of(kobj, 1088 struct damon_sysfs_region, kobj); 1089 1090 return sysfs_emit(buf, "%lu\n", region->start); 1091} 1092 1093static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr, 1094 const char *buf, size_t count) 1095{ 1096 struct damon_sysfs_region *region = container_of(kobj, 1097 struct damon_sysfs_region, kobj); 1098 int err = kstrtoul(buf, 0, &region->start); 1099 1100 return err ? err : count; 1101} 1102 1103static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr, 1104 char *buf) 1105{ 1106 struct damon_sysfs_region *region = container_of(kobj, 1107 struct damon_sysfs_region, kobj); 1108 1109 return sysfs_emit(buf, "%lu\n", region->end); 1110} 1111 1112static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr, 1113 const char *buf, size_t count) 1114{ 1115 struct damon_sysfs_region *region = container_of(kobj, 1116 struct damon_sysfs_region, kobj); 1117 int err = kstrtoul(buf, 0, &region->end); 1118 1119 return err ? err : count; 1120} 1121 1122static void damon_sysfs_region_release(struct kobject *kobj) 1123{ 1124 kfree(container_of(kobj, struct damon_sysfs_region, kobj)); 1125} 1126 1127static struct kobj_attribute damon_sysfs_region_start_attr = 1128 __ATTR_RW_MODE(start, 0600); 1129 1130static struct kobj_attribute damon_sysfs_region_end_attr = 1131 __ATTR_RW_MODE(end, 0600); 1132 1133static struct attribute *damon_sysfs_region_attrs[] = { 1134 &damon_sysfs_region_start_attr.attr, 1135 &damon_sysfs_region_end_attr.attr, 1136 NULL, 1137}; 1138ATTRIBUTE_GROUPS(damon_sysfs_region); 1139 1140static struct kobj_type damon_sysfs_region_ktype = { 1141 .release = damon_sysfs_region_release, 1142 .sysfs_ops = &kobj_sysfs_ops, 1143 .default_groups = damon_sysfs_region_groups, 1144}; 1145 1146/* 1147 * init_regions directory 1148 */ 1149 1150struct damon_sysfs_regions { 1151 struct kobject kobj; 1152 struct damon_sysfs_region **regions_arr; 1153 int nr; 1154}; 1155 1156static struct damon_sysfs_regions *damon_sysfs_regions_alloc(void) 1157{ 1158 return kzalloc(sizeof(struct damon_sysfs_regions), GFP_KERNEL); 1159} 1160 1161static void damon_sysfs_regions_rm_dirs(struct damon_sysfs_regions *regions) 1162{ 1163 struct damon_sysfs_region **regions_arr = regions->regions_arr; 1164 int i; 1165 1166 for (i = 0; i < regions->nr; i++) 1167 kobject_put(&regions_arr[i]->kobj); 1168 regions->nr = 0; 1169 kfree(regions_arr); 1170 regions->regions_arr = NULL; 1171} 1172 1173static int damon_sysfs_regions_add_dirs(struct damon_sysfs_regions *regions, 1174 int nr_regions) 1175{ 1176 struct damon_sysfs_region **regions_arr, *region; 1177 int err, i; 1178 1179 damon_sysfs_regions_rm_dirs(regions); 1180 if (!nr_regions) 1181 return 0; 1182 1183 regions_arr = kmalloc_array(nr_regions, sizeof(*regions_arr), 1184 GFP_KERNEL | __GFP_NOWARN); 1185 if (!regions_arr) 1186 return -ENOMEM; 1187 regions->regions_arr = regions_arr; 1188 1189 for (i = 0; i < nr_regions; i++) { 1190 region = damon_sysfs_region_alloc(0, 0); 1191 if (!region) { 1192 damon_sysfs_regions_rm_dirs(regions); 1193 return -ENOMEM; 1194 } 1195 1196 err = kobject_init_and_add(&region->kobj, 1197 &damon_sysfs_region_ktype, &regions->kobj, 1198 "%d", i); 1199 if (err) { 1200 kobject_put(&region->kobj); 1201 damon_sysfs_regions_rm_dirs(regions); 1202 return err; 1203 } 1204 1205 regions_arr[i] = region; 1206 regions->nr++; 1207 } 1208 return 0; 1209} 1210 1211static ssize_t nr_regions_show(struct kobject *kobj, 1212 struct kobj_attribute *attr, char *buf) 1213{ 1214 struct damon_sysfs_regions *regions = container_of(kobj, 1215 struct damon_sysfs_regions, kobj); 1216 1217 return sysfs_emit(buf, "%d\n", regions->nr); 1218} 1219 1220static ssize_t nr_regions_store(struct kobject *kobj, 1221 struct kobj_attribute *attr, const char *buf, size_t count) 1222{ 1223 struct damon_sysfs_regions *regions; 1224 int nr, err = kstrtoint(buf, 0, &nr); 1225 1226 if (err) 1227 return err; 1228 if (nr < 0) 1229 return -EINVAL; 1230 1231 regions = container_of(kobj, struct damon_sysfs_regions, kobj); 1232 1233 if (!mutex_trylock(&damon_sysfs_lock)) 1234 return -EBUSY; 1235 err = damon_sysfs_regions_add_dirs(regions, nr); 1236 mutex_unlock(&damon_sysfs_lock); 1237 if (err) 1238 return err; 1239 1240 return count; 1241} 1242 1243static void damon_sysfs_regions_release(struct kobject *kobj) 1244{ 1245 kfree(container_of(kobj, struct damon_sysfs_regions, kobj)); 1246} 1247 1248static struct kobj_attribute damon_sysfs_regions_nr_attr = 1249 __ATTR_RW_MODE(nr_regions, 0600); 1250 1251static struct attribute *damon_sysfs_regions_attrs[] = { 1252 &damon_sysfs_regions_nr_attr.attr, 1253 NULL, 1254}; 1255ATTRIBUTE_GROUPS(damon_sysfs_regions); 1256 1257static struct kobj_type damon_sysfs_regions_ktype = { 1258 .release = damon_sysfs_regions_release, 1259 .sysfs_ops = &kobj_sysfs_ops, 1260 .default_groups = damon_sysfs_regions_groups, 1261}; 1262 1263/* 1264 * target directory 1265 */ 1266 1267struct damon_sysfs_target { 1268 struct kobject kobj; 1269 struct damon_sysfs_regions *regions; 1270 int pid; 1271}; 1272 1273static struct damon_sysfs_target *damon_sysfs_target_alloc(void) 1274{ 1275 return kzalloc(sizeof(struct damon_sysfs_target), GFP_KERNEL); 1276} 1277 1278static int damon_sysfs_target_add_dirs(struct damon_sysfs_target *target) 1279{ 1280 struct damon_sysfs_regions *regions = damon_sysfs_regions_alloc(); 1281 int err; 1282 1283 if (!regions) 1284 return -ENOMEM; 1285 1286 err = kobject_init_and_add(&regions->kobj, &damon_sysfs_regions_ktype, 1287 &target->kobj, "regions"); 1288 if (err) 1289 kobject_put(&regions->kobj); 1290 else 1291 target->regions = regions; 1292 return err; 1293} 1294 1295static void damon_sysfs_target_rm_dirs(struct damon_sysfs_target *target) 1296{ 1297 damon_sysfs_regions_rm_dirs(target->regions); 1298 kobject_put(&target->regions->kobj); 1299} 1300 1301static ssize_t pid_target_show(struct kobject *kobj, 1302 struct kobj_attribute *attr, char *buf) 1303{ 1304 struct damon_sysfs_target *target = container_of(kobj, 1305 struct damon_sysfs_target, kobj); 1306 1307 return sysfs_emit(buf, "%d\n", target->pid); 1308} 1309 1310static ssize_t pid_target_store(struct kobject *kobj, 1311 struct kobj_attribute *attr, const char *buf, size_t count) 1312{ 1313 struct damon_sysfs_target *target = container_of(kobj, 1314 struct damon_sysfs_target, kobj); 1315 int err = kstrtoint(buf, 0, &target->pid); 1316 1317 if (err) 1318 return -EINVAL; 1319 return count; 1320} 1321 1322static void damon_sysfs_target_release(struct kobject *kobj) 1323{ 1324 kfree(container_of(kobj, struct damon_sysfs_target, kobj)); 1325} 1326 1327static struct kobj_attribute damon_sysfs_target_pid_attr = 1328 __ATTR_RW_MODE(pid_target, 0600); 1329 1330static struct attribute *damon_sysfs_target_attrs[] = { 1331 &damon_sysfs_target_pid_attr.attr, 1332 NULL, 1333}; 1334ATTRIBUTE_GROUPS(damon_sysfs_target); 1335 1336static struct kobj_type damon_sysfs_target_ktype = { 1337 .release = damon_sysfs_target_release, 1338 .sysfs_ops = &kobj_sysfs_ops, 1339 .default_groups = damon_sysfs_target_groups, 1340}; 1341 1342/* 1343 * targets directory 1344 */ 1345 1346struct damon_sysfs_targets { 1347 struct kobject kobj; 1348 struct damon_sysfs_target **targets_arr; 1349 int nr; 1350}; 1351 1352static struct damon_sysfs_targets *damon_sysfs_targets_alloc(void) 1353{ 1354 return kzalloc(sizeof(struct damon_sysfs_targets), GFP_KERNEL); 1355} 1356 1357static void damon_sysfs_targets_rm_dirs(struct damon_sysfs_targets *targets) 1358{ 1359 struct damon_sysfs_target **targets_arr = targets->targets_arr; 1360 int i; 1361 1362 for (i = 0; i < targets->nr; i++) { 1363 damon_sysfs_target_rm_dirs(targets_arr[i]); 1364 kobject_put(&targets_arr[i]->kobj); 1365 } 1366 targets->nr = 0; 1367 kfree(targets_arr); 1368 targets->targets_arr = NULL; 1369} 1370 1371static int damon_sysfs_targets_add_dirs(struct damon_sysfs_targets *targets, 1372 int nr_targets) 1373{ 1374 struct damon_sysfs_target **targets_arr, *target; 1375 int err, i; 1376 1377 damon_sysfs_targets_rm_dirs(targets); 1378 if (!nr_targets) 1379 return 0; 1380 1381 targets_arr = kmalloc_array(nr_targets, sizeof(*targets_arr), 1382 GFP_KERNEL | __GFP_NOWARN); 1383 if (!targets_arr) 1384 return -ENOMEM; 1385 targets->targets_arr = targets_arr; 1386 1387 for (i = 0; i < nr_targets; i++) { 1388 target = damon_sysfs_target_alloc(); 1389 if (!target) { 1390 damon_sysfs_targets_rm_dirs(targets); 1391 return -ENOMEM; 1392 } 1393 1394 err = kobject_init_and_add(&target->kobj, 1395 &damon_sysfs_target_ktype, &targets->kobj, 1396 "%d", i); 1397 if (err) 1398 goto out; 1399 1400 err = damon_sysfs_target_add_dirs(target); 1401 if (err) 1402 goto out; 1403 1404 targets_arr[i] = target; 1405 targets->nr++; 1406 } 1407 return 0; 1408 1409out: 1410 damon_sysfs_targets_rm_dirs(targets); 1411 kobject_put(&target->kobj); 1412 return err; 1413} 1414 1415static ssize_t nr_targets_show(struct kobject *kobj, 1416 struct kobj_attribute *attr, char *buf) 1417{ 1418 struct damon_sysfs_targets *targets = container_of(kobj, 1419 struct damon_sysfs_targets, kobj); 1420 1421 return sysfs_emit(buf, "%d\n", targets->nr); 1422} 1423 1424static ssize_t nr_targets_store(struct kobject *kobj, 1425 struct kobj_attribute *attr, const char *buf, size_t count) 1426{ 1427 struct damon_sysfs_targets *targets; 1428 int nr, err = kstrtoint(buf, 0, &nr); 1429 1430 if (err) 1431 return err; 1432 if (nr < 0) 1433 return -EINVAL; 1434 1435 targets = container_of(kobj, struct damon_sysfs_targets, kobj); 1436 1437 if (!mutex_trylock(&damon_sysfs_lock)) 1438 return -EBUSY; 1439 err = damon_sysfs_targets_add_dirs(targets, nr); 1440 mutex_unlock(&damon_sysfs_lock); 1441 if (err) 1442 return err; 1443 1444 return count; 1445} 1446 1447static void damon_sysfs_targets_release(struct kobject *kobj) 1448{ 1449 kfree(container_of(kobj, struct damon_sysfs_targets, kobj)); 1450} 1451 1452static struct kobj_attribute damon_sysfs_targets_nr_attr = 1453 __ATTR_RW_MODE(nr_targets, 0600); 1454 1455static struct attribute *damon_sysfs_targets_attrs[] = { 1456 &damon_sysfs_targets_nr_attr.attr, 1457 NULL, 1458}; 1459ATTRIBUTE_GROUPS(damon_sysfs_targets); 1460 1461static struct kobj_type damon_sysfs_targets_ktype = { 1462 .release = damon_sysfs_targets_release, 1463 .sysfs_ops = &kobj_sysfs_ops, 1464 .default_groups = damon_sysfs_targets_groups, 1465}; 1466 1467/* 1468 * intervals directory 1469 */ 1470 1471struct damon_sysfs_intervals { 1472 struct kobject kobj; 1473 unsigned long sample_us; 1474 unsigned long aggr_us; 1475 unsigned long update_us; 1476}; 1477 1478static struct damon_sysfs_intervals *damon_sysfs_intervals_alloc( 1479 unsigned long sample_us, unsigned long aggr_us, 1480 unsigned long update_us) 1481{ 1482 struct damon_sysfs_intervals *intervals = kmalloc(sizeof(*intervals), 1483 GFP_KERNEL); 1484 1485 if (!intervals) 1486 return NULL; 1487 1488 intervals->kobj = (struct kobject){}; 1489 intervals->sample_us = sample_us; 1490 intervals->aggr_us = aggr_us; 1491 intervals->update_us = update_us; 1492 return intervals; 1493} 1494 1495static ssize_t sample_us_show(struct kobject *kobj, 1496 struct kobj_attribute *attr, char *buf) 1497{ 1498 struct damon_sysfs_intervals *intervals = container_of(kobj, 1499 struct damon_sysfs_intervals, kobj); 1500 1501 return sysfs_emit(buf, "%lu\n", intervals->sample_us); 1502} 1503 1504static ssize_t sample_us_store(struct kobject *kobj, 1505 struct kobj_attribute *attr, const char *buf, size_t count) 1506{ 1507 struct damon_sysfs_intervals *intervals = container_of(kobj, 1508 struct damon_sysfs_intervals, kobj); 1509 unsigned long us; 1510 int err = kstrtoul(buf, 0, &us); 1511 1512 if (err) 1513 return err; 1514 1515 intervals->sample_us = us; 1516 return count; 1517} 1518 1519static ssize_t aggr_us_show(struct kobject *kobj, struct kobj_attribute *attr, 1520 char *buf) 1521{ 1522 struct damon_sysfs_intervals *intervals = container_of(kobj, 1523 struct damon_sysfs_intervals, kobj); 1524 1525 return sysfs_emit(buf, "%lu\n", intervals->aggr_us); 1526} 1527 1528static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr, 1529 const char *buf, size_t count) 1530{ 1531 struct damon_sysfs_intervals *intervals = container_of(kobj, 1532 struct damon_sysfs_intervals, kobj); 1533 unsigned long us; 1534 int err = kstrtoul(buf, 0, &us); 1535 1536 if (err) 1537 return err; 1538 1539 intervals->aggr_us = us; 1540 return count; 1541} 1542 1543static ssize_t update_us_show(struct kobject *kobj, 1544 struct kobj_attribute *attr, char *buf) 1545{ 1546 struct damon_sysfs_intervals *intervals = container_of(kobj, 1547 struct damon_sysfs_intervals, kobj); 1548 1549 return sysfs_emit(buf, "%lu\n", intervals->update_us); 1550} 1551 1552static ssize_t update_us_store(struct kobject *kobj, 1553 struct kobj_attribute *attr, const char *buf, size_t count) 1554{ 1555 struct damon_sysfs_intervals *intervals = container_of(kobj, 1556 struct damon_sysfs_intervals, kobj); 1557 unsigned long us; 1558 int err = kstrtoul(buf, 0, &us); 1559 1560 if (err) 1561 return err; 1562 1563 intervals->update_us = us; 1564 return count; 1565} 1566 1567static void damon_sysfs_intervals_release(struct kobject *kobj) 1568{ 1569 kfree(container_of(kobj, struct damon_sysfs_intervals, kobj)); 1570} 1571 1572static struct kobj_attribute damon_sysfs_intervals_sample_us_attr = 1573 __ATTR_RW_MODE(sample_us, 0600); 1574 1575static struct kobj_attribute damon_sysfs_intervals_aggr_us_attr = 1576 __ATTR_RW_MODE(aggr_us, 0600); 1577 1578static struct kobj_attribute damon_sysfs_intervals_update_us_attr = 1579 __ATTR_RW_MODE(update_us, 0600); 1580 1581static struct attribute *damon_sysfs_intervals_attrs[] = { 1582 &damon_sysfs_intervals_sample_us_attr.attr, 1583 &damon_sysfs_intervals_aggr_us_attr.attr, 1584 &damon_sysfs_intervals_update_us_attr.attr, 1585 NULL, 1586}; 1587ATTRIBUTE_GROUPS(damon_sysfs_intervals); 1588 1589static struct kobj_type damon_sysfs_intervals_ktype = { 1590 .release = damon_sysfs_intervals_release, 1591 .sysfs_ops = &kobj_sysfs_ops, 1592 .default_groups = damon_sysfs_intervals_groups, 1593}; 1594 1595/* 1596 * monitoring_attrs directory 1597 */ 1598 1599struct damon_sysfs_attrs { 1600 struct kobject kobj; 1601 struct damon_sysfs_intervals *intervals; 1602 struct damon_sysfs_ul_range *nr_regions_range; 1603}; 1604 1605static struct damon_sysfs_attrs *damon_sysfs_attrs_alloc(void) 1606{ 1607 struct damon_sysfs_attrs *attrs = kmalloc(sizeof(*attrs), GFP_KERNEL); 1608 1609 if (!attrs) 1610 return NULL; 1611 attrs->kobj = (struct kobject){}; 1612 return attrs; 1613} 1614 1615static int damon_sysfs_attrs_add_dirs(struct damon_sysfs_attrs *attrs) 1616{ 1617 struct damon_sysfs_intervals *intervals; 1618 struct damon_sysfs_ul_range *nr_regions_range; 1619 int err; 1620 1621 intervals = damon_sysfs_intervals_alloc(5000, 100000, 60000000); 1622 if (!intervals) 1623 return -ENOMEM; 1624 1625 err = kobject_init_and_add(&intervals->kobj, 1626 &damon_sysfs_intervals_ktype, &attrs->kobj, 1627 "intervals"); 1628 if (err) 1629 goto put_intervals_out; 1630 attrs->intervals = intervals; 1631 1632 nr_regions_range = damon_sysfs_ul_range_alloc(10, 1000); 1633 if (!nr_regions_range) { 1634 err = -ENOMEM; 1635 goto put_intervals_out; 1636 } 1637 1638 err = kobject_init_and_add(&nr_regions_range->kobj, 1639 &damon_sysfs_ul_range_ktype, &attrs->kobj, 1640 "nr_regions"); 1641 if (err) 1642 goto put_nr_regions_intervals_out; 1643 attrs->nr_regions_range = nr_regions_range; 1644 return 0; 1645 1646put_nr_regions_intervals_out: 1647 kobject_put(&nr_regions_range->kobj); 1648 attrs->nr_regions_range = NULL; 1649put_intervals_out: 1650 kobject_put(&intervals->kobj); 1651 attrs->intervals = NULL; 1652 return err; 1653} 1654 1655static void damon_sysfs_attrs_rm_dirs(struct damon_sysfs_attrs *attrs) 1656{ 1657 kobject_put(&attrs->nr_regions_range->kobj); 1658 kobject_put(&attrs->intervals->kobj); 1659} 1660 1661static void damon_sysfs_attrs_release(struct kobject *kobj) 1662{ 1663 kfree(container_of(kobj, struct damon_sysfs_attrs, kobj)); 1664} 1665 1666static struct attribute *damon_sysfs_attrs_attrs[] = { 1667 NULL, 1668}; 1669ATTRIBUTE_GROUPS(damon_sysfs_attrs); 1670 1671static struct kobj_type damon_sysfs_attrs_ktype = { 1672 .release = damon_sysfs_attrs_release, 1673 .sysfs_ops = &kobj_sysfs_ops, 1674 .default_groups = damon_sysfs_attrs_groups, 1675}; 1676 1677/* 1678 * context directory 1679 */ 1680 1681/* This should match with enum damon_ops_id */ 1682static const char * const damon_sysfs_ops_strs[] = { 1683 "vaddr", 1684 "fvaddr", 1685 "paddr", 1686}; 1687 1688struct damon_sysfs_context { 1689 struct kobject kobj; 1690 enum damon_ops_id ops_id; 1691 struct damon_sysfs_attrs *attrs; 1692 struct damon_sysfs_targets *targets; 1693 struct damon_sysfs_schemes *schemes; 1694}; 1695 1696static struct damon_sysfs_context *damon_sysfs_context_alloc( 1697 enum damon_ops_id ops_id) 1698{ 1699 struct damon_sysfs_context *context = kmalloc(sizeof(*context), 1700 GFP_KERNEL); 1701 1702 if (!context) 1703 return NULL; 1704 context->kobj = (struct kobject){}; 1705 context->ops_id = ops_id; 1706 return context; 1707} 1708 1709static int damon_sysfs_context_set_attrs(struct damon_sysfs_context *context) 1710{ 1711 struct damon_sysfs_attrs *attrs = damon_sysfs_attrs_alloc(); 1712 int err; 1713 1714 if (!attrs) 1715 return -ENOMEM; 1716 err = kobject_init_and_add(&attrs->kobj, &damon_sysfs_attrs_ktype, 1717 &context->kobj, "monitoring_attrs"); 1718 if (err) 1719 goto out; 1720 err = damon_sysfs_attrs_add_dirs(attrs); 1721 if (err) 1722 goto out; 1723 context->attrs = attrs; 1724 return 0; 1725 1726out: 1727 kobject_put(&attrs->kobj); 1728 return err; 1729} 1730 1731static int damon_sysfs_context_set_targets(struct damon_sysfs_context *context) 1732{ 1733 struct damon_sysfs_targets *targets = damon_sysfs_targets_alloc(); 1734 int err; 1735 1736 if (!targets) 1737 return -ENOMEM; 1738 err = kobject_init_and_add(&targets->kobj, &damon_sysfs_targets_ktype, 1739 &context->kobj, "targets"); 1740 if (err) { 1741 kobject_put(&targets->kobj); 1742 return err; 1743 } 1744 context->targets = targets; 1745 return 0; 1746} 1747 1748static int damon_sysfs_context_set_schemes(struct damon_sysfs_context *context) 1749{ 1750 struct damon_sysfs_schemes *schemes = damon_sysfs_schemes_alloc(); 1751 int err; 1752 1753 if (!schemes) 1754 return -ENOMEM; 1755 err = kobject_init_and_add(&schemes->kobj, &damon_sysfs_schemes_ktype, 1756 &context->kobj, "schemes"); 1757 if (err) { 1758 kobject_put(&schemes->kobj); 1759 return err; 1760 } 1761 context->schemes = schemes; 1762 return 0; 1763} 1764 1765static int damon_sysfs_context_add_dirs(struct damon_sysfs_context *context) 1766{ 1767 int err; 1768 1769 err = damon_sysfs_context_set_attrs(context); 1770 if (err) 1771 return err; 1772 1773 err = damon_sysfs_context_set_targets(context); 1774 if (err) 1775 goto put_attrs_out; 1776 1777 err = damon_sysfs_context_set_schemes(context); 1778 if (err) 1779 goto put_targets_attrs_out; 1780 return 0; 1781 1782put_targets_attrs_out: 1783 kobject_put(&context->targets->kobj); 1784 context->targets = NULL; 1785put_attrs_out: 1786 kobject_put(&context->attrs->kobj); 1787 context->attrs = NULL; 1788 return err; 1789} 1790 1791static void damon_sysfs_context_rm_dirs(struct damon_sysfs_context *context) 1792{ 1793 damon_sysfs_attrs_rm_dirs(context->attrs); 1794 kobject_put(&context->attrs->kobj); 1795 damon_sysfs_targets_rm_dirs(context->targets); 1796 kobject_put(&context->targets->kobj); 1797 damon_sysfs_schemes_rm_dirs(context->schemes); 1798 kobject_put(&context->schemes->kobj); 1799} 1800 1801static ssize_t avail_operations_show(struct kobject *kobj, 1802 struct kobj_attribute *attr, char *buf) 1803{ 1804 enum damon_ops_id id; 1805 int len = 0; 1806 1807 for (id = 0; id < NR_DAMON_OPS; id++) { 1808 if (!damon_is_registered_ops(id)) 1809 continue; 1810 len += sysfs_emit_at(buf, len, "%s\n", 1811 damon_sysfs_ops_strs[id]); 1812 } 1813 return len; 1814} 1815 1816static ssize_t operations_show(struct kobject *kobj, 1817 struct kobj_attribute *attr, char *buf) 1818{ 1819 struct damon_sysfs_context *context = container_of(kobj, 1820 struct damon_sysfs_context, kobj); 1821 1822 return sysfs_emit(buf, "%s\n", damon_sysfs_ops_strs[context->ops_id]); 1823} 1824 1825static ssize_t operations_store(struct kobject *kobj, 1826 struct kobj_attribute *attr, const char *buf, size_t count) 1827{ 1828 struct damon_sysfs_context *context = container_of(kobj, 1829 struct damon_sysfs_context, kobj); 1830 enum damon_ops_id id; 1831 1832 for (id = 0; id < NR_DAMON_OPS; id++) { 1833 if (sysfs_streq(buf, damon_sysfs_ops_strs[id])) { 1834 context->ops_id = id; 1835 return count; 1836 } 1837 } 1838 return -EINVAL; 1839} 1840 1841static void damon_sysfs_context_release(struct kobject *kobj) 1842{ 1843 kfree(container_of(kobj, struct damon_sysfs_context, kobj)); 1844} 1845 1846static struct kobj_attribute damon_sysfs_context_avail_operations_attr = 1847 __ATTR_RO_MODE(avail_operations, 0400); 1848 1849static struct kobj_attribute damon_sysfs_context_operations_attr = 1850 __ATTR_RW_MODE(operations, 0600); 1851 1852static struct attribute *damon_sysfs_context_attrs[] = { 1853 &damon_sysfs_context_avail_operations_attr.attr, 1854 &damon_sysfs_context_operations_attr.attr, 1855 NULL, 1856}; 1857ATTRIBUTE_GROUPS(damon_sysfs_context); 1858 1859static struct kobj_type damon_sysfs_context_ktype = { 1860 .release = damon_sysfs_context_release, 1861 .sysfs_ops = &kobj_sysfs_ops, 1862 .default_groups = damon_sysfs_context_groups, 1863}; 1864 1865/* 1866 * contexts directory 1867 */ 1868 1869struct damon_sysfs_contexts { 1870 struct kobject kobj; 1871 struct damon_sysfs_context **contexts_arr; 1872 int nr; 1873}; 1874 1875static struct damon_sysfs_contexts *damon_sysfs_contexts_alloc(void) 1876{ 1877 return kzalloc(sizeof(struct damon_sysfs_contexts), GFP_KERNEL); 1878} 1879 1880static void damon_sysfs_contexts_rm_dirs(struct damon_sysfs_contexts *contexts) 1881{ 1882 struct damon_sysfs_context **contexts_arr = contexts->contexts_arr; 1883 int i; 1884 1885 for (i = 0; i < contexts->nr; i++) { 1886 damon_sysfs_context_rm_dirs(contexts_arr[i]); 1887 kobject_put(&contexts_arr[i]->kobj); 1888 } 1889 contexts->nr = 0; 1890 kfree(contexts_arr); 1891 contexts->contexts_arr = NULL; 1892} 1893 1894static int damon_sysfs_contexts_add_dirs(struct damon_sysfs_contexts *contexts, 1895 int nr_contexts) 1896{ 1897 struct damon_sysfs_context **contexts_arr, *context; 1898 int err, i; 1899 1900 damon_sysfs_contexts_rm_dirs(contexts); 1901 if (!nr_contexts) 1902 return 0; 1903 1904 contexts_arr = kmalloc_array(nr_contexts, sizeof(*contexts_arr), 1905 GFP_KERNEL | __GFP_NOWARN); 1906 if (!contexts_arr) 1907 return -ENOMEM; 1908 contexts->contexts_arr = contexts_arr; 1909 1910 for (i = 0; i < nr_contexts; i++) { 1911 context = damon_sysfs_context_alloc(DAMON_OPS_VADDR); 1912 if (!context) { 1913 damon_sysfs_contexts_rm_dirs(contexts); 1914 return -ENOMEM; 1915 } 1916 1917 err = kobject_init_and_add(&context->kobj, 1918 &damon_sysfs_context_ktype, &contexts->kobj, 1919 "%d", i); 1920 if (err) 1921 goto out; 1922 1923 err = damon_sysfs_context_add_dirs(context); 1924 if (err) 1925 goto out; 1926 1927 contexts_arr[i] = context; 1928 contexts->nr++; 1929 } 1930 return 0; 1931 1932out: 1933 damon_sysfs_contexts_rm_dirs(contexts); 1934 kobject_put(&context->kobj); 1935 return err; 1936} 1937 1938static ssize_t nr_contexts_show(struct kobject *kobj, 1939 struct kobj_attribute *attr, char *buf) 1940{ 1941 struct damon_sysfs_contexts *contexts = container_of(kobj, 1942 struct damon_sysfs_contexts, kobj); 1943 1944 return sysfs_emit(buf, "%d\n", contexts->nr); 1945} 1946 1947static ssize_t nr_contexts_store(struct kobject *kobj, 1948 struct kobj_attribute *attr, const char *buf, size_t count) 1949{ 1950 struct damon_sysfs_contexts *contexts; 1951 int nr, err; 1952 1953 err = kstrtoint(buf, 0, &nr); 1954 if (err) 1955 return err; 1956 /* TODO: support multiple contexts per kdamond */ 1957 if (nr < 0 || 1 < nr) 1958 return -EINVAL; 1959 1960 contexts = container_of(kobj, struct damon_sysfs_contexts, kobj); 1961 if (!mutex_trylock(&damon_sysfs_lock)) 1962 return -EBUSY; 1963 err = damon_sysfs_contexts_add_dirs(contexts, nr); 1964 mutex_unlock(&damon_sysfs_lock); 1965 if (err) 1966 return err; 1967 1968 return count; 1969} 1970 1971static void damon_sysfs_contexts_release(struct kobject *kobj) 1972{ 1973 kfree(container_of(kobj, struct damon_sysfs_contexts, kobj)); 1974} 1975 1976static struct kobj_attribute damon_sysfs_contexts_nr_attr 1977 = __ATTR_RW_MODE(nr_contexts, 0600); 1978 1979static struct attribute *damon_sysfs_contexts_attrs[] = { 1980 &damon_sysfs_contexts_nr_attr.attr, 1981 NULL, 1982}; 1983ATTRIBUTE_GROUPS(damon_sysfs_contexts); 1984 1985static struct kobj_type damon_sysfs_contexts_ktype = { 1986 .release = damon_sysfs_contexts_release, 1987 .sysfs_ops = &kobj_sysfs_ops, 1988 .default_groups = damon_sysfs_contexts_groups, 1989}; 1990 1991/* 1992 * kdamond directory 1993 */ 1994 1995struct damon_sysfs_kdamond { 1996 struct kobject kobj; 1997 struct damon_sysfs_contexts *contexts; 1998 struct damon_ctx *damon_ctx; 1999}; 2000 2001static struct damon_sysfs_kdamond *damon_sysfs_kdamond_alloc(void) 2002{ 2003 return kzalloc(sizeof(struct damon_sysfs_kdamond), GFP_KERNEL); 2004} 2005 2006static int damon_sysfs_kdamond_add_dirs(struct damon_sysfs_kdamond *kdamond) 2007{ 2008 struct damon_sysfs_contexts *contexts; 2009 int err; 2010 2011 contexts = damon_sysfs_contexts_alloc(); 2012 if (!contexts) 2013 return -ENOMEM; 2014 2015 err = kobject_init_and_add(&contexts->kobj, 2016 &damon_sysfs_contexts_ktype, &kdamond->kobj, 2017 "contexts"); 2018 if (err) { 2019 kobject_put(&contexts->kobj); 2020 return err; 2021 } 2022 kdamond->contexts = contexts; 2023 2024 return err; 2025} 2026 2027static void damon_sysfs_kdamond_rm_dirs(struct damon_sysfs_kdamond *kdamond) 2028{ 2029 damon_sysfs_contexts_rm_dirs(kdamond->contexts); 2030 kobject_put(&kdamond->contexts->kobj); 2031} 2032 2033static bool damon_sysfs_ctx_running(struct damon_ctx *ctx) 2034{ 2035 bool running; 2036 2037 mutex_lock(&ctx->kdamond_lock); 2038 running = ctx->kdamond != NULL; 2039 mutex_unlock(&ctx->kdamond_lock); 2040 return running; 2041} 2042 2043/* 2044 * enum damon_sysfs_cmd - Commands for a specific kdamond. 2045 */ 2046enum damon_sysfs_cmd { 2047 /* @DAMON_SYSFS_CMD_ON: Turn the kdamond on. */ 2048 DAMON_SYSFS_CMD_ON, 2049 /* @DAMON_SYSFS_CMD_OFF: Turn the kdamond off. */ 2050 DAMON_SYSFS_CMD_OFF, 2051 /* @DAMON_SYSFS_CMD_COMMIT: Update kdamond inputs. */ 2052 DAMON_SYSFS_CMD_COMMIT, 2053 /* 2054 * @DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: Update scheme stats sysfs 2055 * files. 2056 */ 2057 DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS, 2058 /* 2059 * @NR_DAMON_SYSFS_CMDS: Total number of DAMON sysfs commands. 2060 */ 2061 NR_DAMON_SYSFS_CMDS, 2062}; 2063 2064/* Should match with enum damon_sysfs_cmd */ 2065static const char * const damon_sysfs_cmd_strs[] = { 2066 "on", 2067 "off", 2068 "commit", 2069 "update_schemes_stats", 2070}; 2071 2072/* 2073 * struct damon_sysfs_cmd_request - A request to the DAMON callback. 2074 * @cmd: The command that needs to be handled by the callback. 2075 * @kdamond: The kobject wrapper that associated to the kdamond thread. 2076 * 2077 * This structure represents a sysfs command request that need to access some 2078 * DAMON context-internal data. Because DAMON context-internal data can be 2079 * safely accessed from DAMON callbacks without additional synchronization, the 2080 * request will be handled by the DAMON callback. None-``NULL`` @kdamond means 2081 * the request is valid. 2082 */ 2083struct damon_sysfs_cmd_request { 2084 enum damon_sysfs_cmd cmd; 2085 struct damon_sysfs_kdamond *kdamond; 2086}; 2087 2088/* Current DAMON callback request. Protected by damon_sysfs_lock. */ 2089static struct damon_sysfs_cmd_request damon_sysfs_cmd_request; 2090 2091static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, 2092 char *buf) 2093{ 2094 struct damon_sysfs_kdamond *kdamond = container_of(kobj, 2095 struct damon_sysfs_kdamond, kobj); 2096 struct damon_ctx *ctx = kdamond->damon_ctx; 2097 bool running; 2098 2099 if (!ctx) 2100 running = false; 2101 else 2102 running = damon_sysfs_ctx_running(ctx); 2103 2104 return sysfs_emit(buf, "%s\n", running ? 2105 damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_ON] : 2106 damon_sysfs_cmd_strs[DAMON_SYSFS_CMD_OFF]); 2107} 2108 2109static int damon_sysfs_set_attrs(struct damon_ctx *ctx, 2110 struct damon_sysfs_attrs *sys_attrs) 2111{ 2112 struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals; 2113 struct damon_sysfs_ul_range *sys_nr_regions = 2114 sys_attrs->nr_regions_range; 2115 struct damon_attrs attrs = { 2116 .sample_interval = sys_intervals->sample_us, 2117 .aggr_interval = sys_intervals->aggr_us, 2118 .ops_update_interval = sys_intervals->update_us, 2119 .min_nr_regions = sys_nr_regions->min, 2120 .max_nr_regions = sys_nr_regions->max, 2121 }; 2122 return damon_set_attrs(ctx, &attrs); 2123} 2124 2125static void damon_sysfs_destroy_targets(struct damon_ctx *ctx) 2126{ 2127 struct damon_target *t, *next; 2128 bool has_pid = damon_target_has_pid(ctx); 2129 2130 damon_for_each_target_safe(t, next, ctx) { 2131 if (has_pid) 2132 put_pid(t->pid); 2133 damon_destroy_target(t); 2134 } 2135} 2136 2137static int damon_sysfs_set_regions(struct damon_target *t, 2138 struct damon_sysfs_regions *sysfs_regions) 2139{ 2140 struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr, 2141 sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN); 2142 int i, err = -EINVAL; 2143 2144 if (!ranges) 2145 return -ENOMEM; 2146 for (i = 0; i < sysfs_regions->nr; i++) { 2147 struct damon_sysfs_region *sys_region = 2148 sysfs_regions->regions_arr[i]; 2149 2150 if (sys_region->start > sys_region->end) 2151 goto out; 2152 2153 ranges[i].start = sys_region->start; 2154 ranges[i].end = sys_region->end; 2155 if (i == 0) 2156 continue; 2157 if (ranges[i - 1].end > ranges[i].start) 2158 goto out; 2159 } 2160 err = damon_set_regions(t, ranges, sysfs_regions->nr); 2161out: 2162 kfree(ranges); 2163 return err; 2164 2165} 2166 2167static int damon_sysfs_add_target(struct damon_sysfs_target *sys_target, 2168 struct damon_ctx *ctx) 2169{ 2170 struct damon_target *t = damon_new_target(); 2171 int err = -EINVAL; 2172 2173 if (!t) 2174 return -ENOMEM; 2175 damon_add_target(ctx, t); 2176 if (damon_target_has_pid(ctx)) { 2177 t->pid = find_get_pid(sys_target->pid); 2178 if (!t->pid) 2179 goto destroy_targets_out; 2180 } 2181 err = damon_sysfs_set_regions(t, sys_target->regions); 2182 if (err) 2183 goto destroy_targets_out; 2184 return 0; 2185 2186destroy_targets_out: 2187 damon_sysfs_destroy_targets(ctx); 2188 return err; 2189} 2190 2191/* 2192 * Search a target in a context that corresponds to the sysfs target input. 2193 * 2194 * Return: pointer to the target if found, NULL if not found, or negative 2195 * error code if the search failed. 2196 */ 2197static struct damon_target *damon_sysfs_existing_target( 2198 struct damon_sysfs_target *sys_target, struct damon_ctx *ctx) 2199{ 2200 struct pid *pid; 2201 struct damon_target *t; 2202 2203 if (!damon_target_has_pid(ctx)) { 2204 /* Up to only one target for paddr could exist */ 2205 damon_for_each_target(t, ctx) 2206 return t; 2207 return NULL; 2208 } 2209 2210 /* ops.id should be DAMON_OPS_VADDR or DAMON_OPS_FVADDR */ 2211 pid = find_get_pid(sys_target->pid); 2212 if (!pid) 2213 return ERR_PTR(-EINVAL); 2214 damon_for_each_target(t, ctx) { 2215 if (t->pid == pid) { 2216 put_pid(pid); 2217 return t; 2218 } 2219 } 2220 put_pid(pid); 2221 return NULL; 2222} 2223 2224static int damon_sysfs_set_targets(struct damon_ctx *ctx, 2225 struct damon_sysfs_targets *sysfs_targets) 2226{ 2227 int i, err; 2228 2229 /* Multiple physical address space monitoring targets makes no sense */ 2230 if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1) 2231 return -EINVAL; 2232 2233 for (i = 0; i < sysfs_targets->nr; i++) { 2234 struct damon_sysfs_target *st = sysfs_targets->targets_arr[i]; 2235 struct damon_target *t = damon_sysfs_existing_target(st, ctx); 2236 2237 if (IS_ERR(t)) 2238 return PTR_ERR(t); 2239 if (!t) 2240 err = damon_sysfs_add_target(st, ctx); 2241 else 2242 err = damon_sysfs_set_regions(t, st->regions); 2243 if (err) 2244 return err; 2245 } 2246 return 0; 2247} 2248 2249static struct damos *damon_sysfs_mk_scheme( 2250 struct damon_sysfs_scheme *sysfs_scheme) 2251{ 2252 struct damon_sysfs_access_pattern *access_pattern = 2253 sysfs_scheme->access_pattern; 2254 struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas; 2255 struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights; 2256 struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks; 2257 2258 struct damos_access_pattern pattern = { 2259 .min_sz_region = access_pattern->sz->min, 2260 .max_sz_region = access_pattern->sz->max, 2261 .min_nr_accesses = access_pattern->nr_accesses->min, 2262 .max_nr_accesses = access_pattern->nr_accesses->max, 2263 .min_age_region = access_pattern->age->min, 2264 .max_age_region = access_pattern->age->max, 2265 }; 2266 struct damos_quota quota = { 2267 .ms = sysfs_quotas->ms, 2268 .sz = sysfs_quotas->sz, 2269 .reset_interval = sysfs_quotas->reset_interval_ms, 2270 .weight_sz = sysfs_weights->sz, 2271 .weight_nr_accesses = sysfs_weights->nr_accesses, 2272 .weight_age = sysfs_weights->age, 2273 }; 2274 struct damos_watermarks wmarks = { 2275 .metric = sysfs_wmarks->metric, 2276 .interval = sysfs_wmarks->interval_us, 2277 .high = sysfs_wmarks->high, 2278 .mid = sysfs_wmarks->mid, 2279 .low = sysfs_wmarks->low, 2280 }; 2281 2282 return damon_new_scheme(&pattern, sysfs_scheme->action, &quota, 2283 &wmarks); 2284} 2285 2286static void damon_sysfs_update_scheme(struct damos *scheme, 2287 struct damon_sysfs_scheme *sysfs_scheme) 2288{ 2289 struct damon_sysfs_access_pattern *access_pattern = 2290 sysfs_scheme->access_pattern; 2291 struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas; 2292 struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights; 2293 struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks; 2294 2295 scheme->pattern.min_sz_region = access_pattern->sz->min; 2296 scheme->pattern.max_sz_region = access_pattern->sz->max; 2297 scheme->pattern.min_nr_accesses = access_pattern->nr_accesses->min; 2298 scheme->pattern.max_nr_accesses = access_pattern->nr_accesses->max; 2299 scheme->pattern.min_age_region = access_pattern->age->min; 2300 scheme->pattern.max_age_region = access_pattern->age->max; 2301 2302 scheme->action = sysfs_scheme->action; 2303 2304 scheme->quota.ms = sysfs_quotas->ms; 2305 scheme->quota.sz = sysfs_quotas->sz; 2306 scheme->quota.reset_interval = sysfs_quotas->reset_interval_ms; 2307 scheme->quota.weight_sz = sysfs_weights->sz; 2308 scheme->quota.weight_nr_accesses = sysfs_weights->nr_accesses; 2309 scheme->quota.weight_age = sysfs_weights->age; 2310 2311 scheme->wmarks.metric = sysfs_wmarks->metric; 2312 scheme->wmarks.interval = sysfs_wmarks->interval_us; 2313 scheme->wmarks.high = sysfs_wmarks->high; 2314 scheme->wmarks.mid = sysfs_wmarks->mid; 2315 scheme->wmarks.low = sysfs_wmarks->low; 2316} 2317 2318static int damon_sysfs_set_schemes(struct damon_ctx *ctx, 2319 struct damon_sysfs_schemes *sysfs_schemes) 2320{ 2321 struct damos *scheme, *next; 2322 int i = 0; 2323 2324 damon_for_each_scheme_safe(scheme, next, ctx) { 2325 if (i < sysfs_schemes->nr) 2326 damon_sysfs_update_scheme(scheme, 2327 sysfs_schemes->schemes_arr[i]); 2328 else 2329 damon_destroy_scheme(scheme); 2330 i++; 2331 } 2332 2333 for (; i < sysfs_schemes->nr; i++) { 2334 struct damos *scheme, *next; 2335 2336 scheme = damon_sysfs_mk_scheme(sysfs_schemes->schemes_arr[i]); 2337 if (!scheme) { 2338 damon_for_each_scheme_safe(scheme, next, ctx) 2339 damon_destroy_scheme(scheme); 2340 return -ENOMEM; 2341 } 2342 damon_add_scheme(ctx, scheme); 2343 } 2344 return 0; 2345} 2346 2347static void damon_sysfs_before_terminate(struct damon_ctx *ctx) 2348{ 2349 struct damon_target *t, *next; 2350 2351 if (!damon_target_has_pid(ctx)) 2352 return; 2353 2354 mutex_lock(&ctx->kdamond_lock); 2355 damon_for_each_target_safe(t, next, ctx) { 2356 put_pid(t->pid); 2357 damon_destroy_target(t); 2358 } 2359 mutex_unlock(&ctx->kdamond_lock); 2360} 2361 2362/* 2363 * damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files. 2364 * @kdamond: The kobject wrapper that associated to the kdamond thread. 2365 * 2366 * This function reads the schemes stats of specific kdamond and update the 2367 * related values for sysfs files. This function should be called from DAMON 2368 * callbacks while holding ``damon_syfs_lock``, to safely access the DAMON 2369 * contexts-internal data and DAMON sysfs variables. 2370 */ 2371static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond) 2372{ 2373 struct damon_ctx *ctx = kdamond->damon_ctx; 2374 struct damon_sysfs_schemes *sysfs_schemes; 2375 struct damos *scheme; 2376 int schemes_idx = 0; 2377 2378 if (!ctx) 2379 return -EINVAL; 2380 sysfs_schemes = kdamond->contexts->contexts_arr[0]->schemes; 2381 damon_for_each_scheme(scheme, ctx) { 2382 struct damon_sysfs_stats *sysfs_stats; 2383 2384 /* user could have removed the scheme sysfs dir */ 2385 if (schemes_idx >= sysfs_schemes->nr) 2386 break; 2387 2388 sysfs_stats = sysfs_schemes->schemes_arr[schemes_idx++]->stats; 2389 sysfs_stats->nr_tried = scheme->stat.nr_tried; 2390 sysfs_stats->sz_tried = scheme->stat.sz_tried; 2391 sysfs_stats->nr_applied = scheme->stat.nr_applied; 2392 sysfs_stats->sz_applied = scheme->stat.sz_applied; 2393 sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds; 2394 } 2395 return 0; 2396} 2397 2398static inline bool damon_sysfs_kdamond_running( 2399 struct damon_sysfs_kdamond *kdamond) 2400{ 2401 return kdamond->damon_ctx && 2402 damon_sysfs_ctx_running(kdamond->damon_ctx); 2403} 2404 2405static int damon_sysfs_apply_inputs(struct damon_ctx *ctx, 2406 struct damon_sysfs_context *sys_ctx) 2407{ 2408 int err; 2409 2410 err = damon_select_ops(ctx, sys_ctx->ops_id); 2411 if (err) 2412 return err; 2413 err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs); 2414 if (err) 2415 return err; 2416 err = damon_sysfs_set_targets(ctx, sys_ctx->targets); 2417 if (err) 2418 return err; 2419 return damon_sysfs_set_schemes(ctx, sys_ctx->schemes); 2420} 2421 2422/* 2423 * damon_sysfs_commit_input() - Commit user inputs to a running kdamond. 2424 * @kdamond: The kobject wrapper for the associated kdamond. 2425 * 2426 * If the sysfs input is wrong, the kdamond will be terminated. 2427 */ 2428static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond) 2429{ 2430 if (!damon_sysfs_kdamond_running(kdamond)) 2431 return -EINVAL; 2432 /* TODO: Support multiple contexts per kdamond */ 2433 if (kdamond->contexts->nr != 1) 2434 return -EINVAL; 2435 2436 return damon_sysfs_apply_inputs(kdamond->damon_ctx, 2437 kdamond->contexts->contexts_arr[0]); 2438} 2439 2440/* 2441 * damon_sysfs_cmd_request_callback() - DAMON callback for handling requests. 2442 * @c: The DAMON context of the callback. 2443 * 2444 * This function is periodically called back from the kdamond thread for @c. 2445 * Then, it checks if there is a waiting DAMON sysfs request and handles it. 2446 */ 2447static int damon_sysfs_cmd_request_callback(struct damon_ctx *c) 2448{ 2449 struct damon_sysfs_kdamond *kdamond; 2450 int err = 0; 2451 2452 /* avoid deadlock due to concurrent state_store('off') */ 2453 if (!mutex_trylock(&damon_sysfs_lock)) 2454 return 0; 2455 kdamond = damon_sysfs_cmd_request.kdamond; 2456 if (!kdamond || kdamond->damon_ctx != c) 2457 goto out; 2458 switch (damon_sysfs_cmd_request.cmd) { 2459 case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: 2460 err = damon_sysfs_upd_schemes_stats(kdamond); 2461 break; 2462 case DAMON_SYSFS_CMD_COMMIT: 2463 err = damon_sysfs_commit_input(kdamond); 2464 break; 2465 default: 2466 break; 2467 } 2468 /* Mark the request as invalid now. */ 2469 damon_sysfs_cmd_request.kdamond = NULL; 2470out: 2471 mutex_unlock(&damon_sysfs_lock); 2472 return err; 2473} 2474 2475static struct damon_ctx *damon_sysfs_build_ctx( 2476 struct damon_sysfs_context *sys_ctx) 2477{ 2478 struct damon_ctx *ctx = damon_new_ctx(); 2479 int err; 2480 2481 if (!ctx) 2482 return ERR_PTR(-ENOMEM); 2483 2484 err = damon_sysfs_apply_inputs(ctx, sys_ctx); 2485 if (err) { 2486 damon_destroy_ctx(ctx); 2487 return ERR_PTR(err); 2488 } 2489 2490 ctx->callback.after_wmarks_check = damon_sysfs_cmd_request_callback; 2491 ctx->callback.after_aggregation = damon_sysfs_cmd_request_callback; 2492 ctx->callback.before_terminate = damon_sysfs_before_terminate; 2493 return ctx; 2494} 2495 2496static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond) 2497{ 2498 struct damon_ctx *ctx; 2499 int err; 2500 2501 if (damon_sysfs_kdamond_running(kdamond)) 2502 return -EBUSY; 2503 if (damon_sysfs_cmd_request.kdamond == kdamond) 2504 return -EBUSY; 2505 /* TODO: support multiple contexts per kdamond */ 2506 if (kdamond->contexts->nr != 1) 2507 return -EINVAL; 2508 2509 if (kdamond->damon_ctx) 2510 damon_destroy_ctx(kdamond->damon_ctx); 2511 kdamond->damon_ctx = NULL; 2512 2513 ctx = damon_sysfs_build_ctx(kdamond->contexts->contexts_arr[0]); 2514 if (IS_ERR(ctx)) 2515 return PTR_ERR(ctx); 2516 err = damon_start(&ctx, 1, false); 2517 if (err) { 2518 damon_destroy_ctx(ctx); 2519 return err; 2520 } 2521 kdamond->damon_ctx = ctx; 2522 return err; 2523} 2524 2525static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond) 2526{ 2527 if (!kdamond->damon_ctx) 2528 return -EINVAL; 2529 return damon_stop(&kdamond->damon_ctx, 1); 2530 /* 2531 * To allow users show final monitoring results of already turned-off 2532 * DAMON, we free kdamond->damon_ctx in next 2533 * damon_sysfs_turn_damon_on(), or kdamonds_nr_store() 2534 */ 2535} 2536 2537/* 2538 * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond. 2539 * @cmd: The command to handle. 2540 * @kdamond: The kobject wrapper for the associated kdamond. 2541 * 2542 * This function handles a DAMON sysfs command for a kdamond. For commands 2543 * that need to access running DAMON context-internal data, it requests 2544 * handling of the command to the DAMON callback 2545 * (@damon_sysfs_cmd_request_callback()) and wait until it is properly handled, 2546 * or the context is completed. 2547 * 2548 * Return: 0 on success, negative error code otherwise. 2549 */ 2550static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd, 2551 struct damon_sysfs_kdamond *kdamond) 2552{ 2553 bool need_wait = true; 2554 2555 /* Handle commands that doesn't access DAMON context-internal data */ 2556 switch (cmd) { 2557 case DAMON_SYSFS_CMD_ON: 2558 return damon_sysfs_turn_damon_on(kdamond); 2559 case DAMON_SYSFS_CMD_OFF: 2560 return damon_sysfs_turn_damon_off(kdamond); 2561 default: 2562 break; 2563 } 2564 2565 /* Pass the command to DAMON callback for safe DAMON context access */ 2566 if (damon_sysfs_cmd_request.kdamond) 2567 return -EBUSY; 2568 if (!damon_sysfs_kdamond_running(kdamond)) 2569 return -EINVAL; 2570 damon_sysfs_cmd_request.cmd = cmd; 2571 damon_sysfs_cmd_request.kdamond = kdamond; 2572 2573 /* 2574 * wait until damon_sysfs_cmd_request_callback() handles the request 2575 * from kdamond context 2576 */ 2577 mutex_unlock(&damon_sysfs_lock); 2578 while (need_wait) { 2579 schedule_timeout_idle(msecs_to_jiffies(100)); 2580 if (!mutex_trylock(&damon_sysfs_lock)) 2581 continue; 2582 if (!damon_sysfs_cmd_request.kdamond) { 2583 /* damon_sysfs_cmd_request_callback() handled */ 2584 need_wait = false; 2585 } else if (!damon_sysfs_kdamond_running(kdamond)) { 2586 /* kdamond has already finished */ 2587 need_wait = false; 2588 damon_sysfs_cmd_request.kdamond = NULL; 2589 } 2590 mutex_unlock(&damon_sysfs_lock); 2591 } 2592 mutex_lock(&damon_sysfs_lock); 2593 return 0; 2594} 2595 2596static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, 2597 const char *buf, size_t count) 2598{ 2599 struct damon_sysfs_kdamond *kdamond = container_of(kobj, 2600 struct damon_sysfs_kdamond, kobj); 2601 enum damon_sysfs_cmd cmd; 2602 ssize_t ret = -EINVAL; 2603 2604 if (!mutex_trylock(&damon_sysfs_lock)) 2605 return -EBUSY; 2606 for (cmd = 0; cmd < NR_DAMON_SYSFS_CMDS; cmd++) { 2607 if (sysfs_streq(buf, damon_sysfs_cmd_strs[cmd])) { 2608 ret = damon_sysfs_handle_cmd(cmd, kdamond); 2609 break; 2610 } 2611 } 2612 mutex_unlock(&damon_sysfs_lock); 2613 if (!ret) 2614 ret = count; 2615 return ret; 2616} 2617 2618static ssize_t pid_show(struct kobject *kobj, 2619 struct kobj_attribute *attr, char *buf) 2620{ 2621 struct damon_sysfs_kdamond *kdamond = container_of(kobj, 2622 struct damon_sysfs_kdamond, kobj); 2623 struct damon_ctx *ctx; 2624 int pid = -1; 2625 2626 if (!mutex_trylock(&damon_sysfs_lock)) 2627 return -EBUSY; 2628 ctx = kdamond->damon_ctx; 2629 if (!ctx) 2630 goto out; 2631 2632 mutex_lock(&ctx->kdamond_lock); 2633 if (ctx->kdamond) 2634 pid = ctx->kdamond->pid; 2635 mutex_unlock(&ctx->kdamond_lock); 2636out: 2637 mutex_unlock(&damon_sysfs_lock); 2638 return sysfs_emit(buf, "%d\n", pid); 2639} 2640 2641static void damon_sysfs_kdamond_release(struct kobject *kobj) 2642{ 2643 struct damon_sysfs_kdamond *kdamond = container_of(kobj, 2644 struct damon_sysfs_kdamond, kobj); 2645 2646 if (kdamond->damon_ctx) 2647 damon_destroy_ctx(kdamond->damon_ctx); 2648 kfree(kdamond); 2649} 2650 2651static struct kobj_attribute damon_sysfs_kdamond_state_attr = 2652 __ATTR_RW_MODE(state, 0600); 2653 2654static struct kobj_attribute damon_sysfs_kdamond_pid_attr = 2655 __ATTR_RO_MODE(pid, 0400); 2656 2657static struct attribute *damon_sysfs_kdamond_attrs[] = { 2658 &damon_sysfs_kdamond_state_attr.attr, 2659 &damon_sysfs_kdamond_pid_attr.attr, 2660 NULL, 2661}; 2662ATTRIBUTE_GROUPS(damon_sysfs_kdamond); 2663 2664static struct kobj_type damon_sysfs_kdamond_ktype = { 2665 .release = damon_sysfs_kdamond_release, 2666 .sysfs_ops = &kobj_sysfs_ops, 2667 .default_groups = damon_sysfs_kdamond_groups, 2668}; 2669 2670/* 2671 * kdamonds directory 2672 */ 2673 2674struct damon_sysfs_kdamonds { 2675 struct kobject kobj; 2676 struct damon_sysfs_kdamond **kdamonds_arr; 2677 int nr; 2678}; 2679 2680static struct damon_sysfs_kdamonds *damon_sysfs_kdamonds_alloc(void) 2681{ 2682 return kzalloc(sizeof(struct damon_sysfs_kdamonds), GFP_KERNEL); 2683} 2684 2685static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds) 2686{ 2687 struct damon_sysfs_kdamond **kdamonds_arr = kdamonds->kdamonds_arr; 2688 int i; 2689 2690 for (i = 0; i < kdamonds->nr; i++) { 2691 damon_sysfs_kdamond_rm_dirs(kdamonds_arr[i]); 2692 kobject_put(&kdamonds_arr[i]->kobj); 2693 } 2694 kdamonds->nr = 0; 2695 kfree(kdamonds_arr); 2696 kdamonds->kdamonds_arr = NULL; 2697} 2698 2699static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond **kdamonds, 2700 int nr_kdamonds) 2701{ 2702 int i; 2703 2704 for (i = 0; i < nr_kdamonds; i++) { 2705 if (damon_sysfs_kdamond_running(kdamonds[i]) || 2706 damon_sysfs_cmd_request.kdamond == kdamonds[i]) 2707 return true; 2708 } 2709 2710 return false; 2711} 2712 2713static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds, 2714 int nr_kdamonds) 2715{ 2716 struct damon_sysfs_kdamond **kdamonds_arr, *kdamond; 2717 int err, i; 2718 2719 if (damon_sysfs_kdamonds_busy(kdamonds->kdamonds_arr, kdamonds->nr)) 2720 return -EBUSY; 2721 2722 damon_sysfs_kdamonds_rm_dirs(kdamonds); 2723 if (!nr_kdamonds) 2724 return 0; 2725 2726 kdamonds_arr = kmalloc_array(nr_kdamonds, sizeof(*kdamonds_arr), 2727 GFP_KERNEL | __GFP_NOWARN); 2728 if (!kdamonds_arr) 2729 return -ENOMEM; 2730 kdamonds->kdamonds_arr = kdamonds_arr; 2731 2732 for (i = 0; i < nr_kdamonds; i++) { 2733 kdamond = damon_sysfs_kdamond_alloc(); 2734 if (!kdamond) { 2735 damon_sysfs_kdamonds_rm_dirs(kdamonds); 2736 return -ENOMEM; 2737 } 2738 2739 err = kobject_init_and_add(&kdamond->kobj, 2740 &damon_sysfs_kdamond_ktype, &kdamonds->kobj, 2741 "%d", i); 2742 if (err) 2743 goto out; 2744 2745 err = damon_sysfs_kdamond_add_dirs(kdamond); 2746 if (err) 2747 goto out; 2748 2749 kdamonds_arr[i] = kdamond; 2750 kdamonds->nr++; 2751 } 2752 return 0; 2753 2754out: 2755 damon_sysfs_kdamonds_rm_dirs(kdamonds); 2756 kobject_put(&kdamond->kobj); 2757 return err; 2758} 2759 2760static ssize_t nr_kdamonds_show(struct kobject *kobj, 2761 struct kobj_attribute *attr, char *buf) 2762{ 2763 struct damon_sysfs_kdamonds *kdamonds = container_of(kobj, 2764 struct damon_sysfs_kdamonds, kobj); 2765 2766 return sysfs_emit(buf, "%d\n", kdamonds->nr); 2767} 2768 2769static ssize_t nr_kdamonds_store(struct kobject *kobj, 2770 struct kobj_attribute *attr, const char *buf, size_t count) 2771{ 2772 struct damon_sysfs_kdamonds *kdamonds; 2773 int nr, err; 2774 2775 err = kstrtoint(buf, 0, &nr); 2776 if (err) 2777 return err; 2778 if (nr < 0) 2779 return -EINVAL; 2780 2781 kdamonds = container_of(kobj, struct damon_sysfs_kdamonds, kobj); 2782 2783 if (!mutex_trylock(&damon_sysfs_lock)) 2784 return -EBUSY; 2785 err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr); 2786 mutex_unlock(&damon_sysfs_lock); 2787 if (err) 2788 return err; 2789 2790 return count; 2791} 2792 2793static void damon_sysfs_kdamonds_release(struct kobject *kobj) 2794{ 2795 kfree(container_of(kobj, struct damon_sysfs_kdamonds, kobj)); 2796} 2797 2798static struct kobj_attribute damon_sysfs_kdamonds_nr_attr = 2799 __ATTR_RW_MODE(nr_kdamonds, 0600); 2800 2801static struct attribute *damon_sysfs_kdamonds_attrs[] = { 2802 &damon_sysfs_kdamonds_nr_attr.attr, 2803 NULL, 2804}; 2805ATTRIBUTE_GROUPS(damon_sysfs_kdamonds); 2806 2807static struct kobj_type damon_sysfs_kdamonds_ktype = { 2808 .release = damon_sysfs_kdamonds_release, 2809 .sysfs_ops = &kobj_sysfs_ops, 2810 .default_groups = damon_sysfs_kdamonds_groups, 2811}; 2812 2813/* 2814 * damon user interface directory 2815 */ 2816 2817struct damon_sysfs_ui_dir { 2818 struct kobject kobj; 2819 struct damon_sysfs_kdamonds *kdamonds; 2820}; 2821 2822static struct damon_sysfs_ui_dir *damon_sysfs_ui_dir_alloc(void) 2823{ 2824 return kzalloc(sizeof(struct damon_sysfs_ui_dir), GFP_KERNEL); 2825} 2826 2827static int damon_sysfs_ui_dir_add_dirs(struct damon_sysfs_ui_dir *ui_dir) 2828{ 2829 struct damon_sysfs_kdamonds *kdamonds; 2830 int err; 2831 2832 kdamonds = damon_sysfs_kdamonds_alloc(); 2833 if (!kdamonds) 2834 return -ENOMEM; 2835 2836 err = kobject_init_and_add(&kdamonds->kobj, 2837 &damon_sysfs_kdamonds_ktype, &ui_dir->kobj, 2838 "kdamonds"); 2839 if (err) { 2840 kobject_put(&kdamonds->kobj); 2841 return err; 2842 } 2843 ui_dir->kdamonds = kdamonds; 2844 return err; 2845} 2846 2847static void damon_sysfs_ui_dir_release(struct kobject *kobj) 2848{ 2849 kfree(container_of(kobj, struct damon_sysfs_ui_dir, kobj)); 2850} 2851 2852static struct attribute *damon_sysfs_ui_dir_attrs[] = { 2853 NULL, 2854}; 2855ATTRIBUTE_GROUPS(damon_sysfs_ui_dir); 2856 2857static struct kobj_type damon_sysfs_ui_dir_ktype = { 2858 .release = damon_sysfs_ui_dir_release, 2859 .sysfs_ops = &kobj_sysfs_ops, 2860 .default_groups = damon_sysfs_ui_dir_groups, 2861}; 2862 2863static int __init damon_sysfs_init(void) 2864{ 2865 struct kobject *damon_sysfs_root; 2866 struct damon_sysfs_ui_dir *admin; 2867 int err; 2868 2869 damon_sysfs_root = kobject_create_and_add("damon", mm_kobj); 2870 if (!damon_sysfs_root) 2871 return -ENOMEM; 2872 2873 admin = damon_sysfs_ui_dir_alloc(); 2874 if (!admin) { 2875 kobject_put(damon_sysfs_root); 2876 return -ENOMEM; 2877 } 2878 err = kobject_init_and_add(&admin->kobj, &damon_sysfs_ui_dir_ktype, 2879 damon_sysfs_root, "admin"); 2880 if (err) 2881 goto out; 2882 err = damon_sysfs_ui_dir_add_dirs(admin); 2883 if (err) 2884 goto out; 2885 return 0; 2886 2887out: 2888 kobject_put(&admin->kobj); 2889 kobject_put(damon_sysfs_root); 2890 return err; 2891} 2892subsys_initcall(damon_sysfs_init);