Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cpufreq: governor: Get rid of governor events

The design of the cpufreq governor API is not very straightforward,
as struct cpufreq_governor provides only one callback to be invoked
from different code paths for different purposes. The purpose it is
invoked for is determined by its second "event" argument, causing it
to act as a "callback multiplexer" of sorts.

Unfortunately, that leads to extra complexity in governors, some of
which implement the ->governor() callback as a switch statement
that simply checks the event argument and invokes a separate function
to handle that specific event.

That extra complexity can be eliminated by replacing the all-purpose
->governor() callback with a family of callbacks to carry out specific
governor operations: initialization and exit, start and stop and policy
limits updates. That also turns out to reduce the code size too, so
do it.

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>

+161 -207
+41 -45
arch/powerpc/platforms/cell/cpufreq_spudemand.c
··· 85 85 cancel_delayed_work_sync(&info->work); 86 86 } 87 87 88 - static int spu_gov_govern(struct cpufreq_policy *policy, unsigned int event) 88 + static int spu_gov_start(struct cpufreq_policy *policy) 89 89 { 90 90 unsigned int cpu = policy->cpu; 91 - struct spu_gov_info_struct *info, *affected_info; 91 + struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu); 92 + struct spu_gov_info_struct *affected_info; 92 93 int i; 93 - int ret = 0; 94 94 95 - info = &per_cpu(spu_gov_info, cpu); 96 - 97 - switch (event) { 98 - case CPUFREQ_GOV_START: 99 - if (!cpu_online(cpu)) { 100 - printk(KERN_ERR "cpu %d is not online\n", cpu); 101 - ret = -EINVAL; 102 - break; 103 - } 104 - 105 - if (!policy->cur) { 106 - printk(KERN_ERR "no cpu specified in policy\n"); 107 - ret = -EINVAL; 108 - break; 109 - } 110 - 111 - /* initialize spu_gov_info for all affected cpus */ 112 - for_each_cpu(i, policy->cpus) { 113 - affected_info = &per_cpu(spu_gov_info, i); 114 - affected_info->policy = policy; 115 - } 116 - 117 - info->poll_int = POLL_TIME; 118 - 119 - /* setup timer */ 120 - spu_gov_init_work(info); 121 - 122 - break; 123 - 124 - case CPUFREQ_GOV_STOP: 125 - /* cancel timer */ 126 - spu_gov_cancel_work(info); 127 - 128 - /* clean spu_gov_info for all affected cpus */ 129 - for_each_cpu (i, policy->cpus) { 130 - info = &per_cpu(spu_gov_info, i); 131 - info->policy = NULL; 132 - } 133 - 134 - break; 95 + if (!cpu_online(cpu)) { 96 + printk(KERN_ERR "cpu %d is not online\n", cpu); 97 + return -EINVAL; 135 98 } 136 99 137 - return ret; 100 + if (!policy->cur) { 101 + printk(KERN_ERR "no cpu specified in policy\n"); 102 + return -EINVAL; 103 + } 104 + 105 + /* initialize spu_gov_info for all affected cpus */ 106 + for_each_cpu(i, policy->cpus) { 107 + affected_info = &per_cpu(spu_gov_info, i); 108 + affected_info->policy = policy; 109 + } 110 + 111 + info->poll_int = POLL_TIME; 112 + 113 + /* setup timer */ 114 + spu_gov_init_work(info); 115 + 116 + return 0; 117 + } 118 + 119 + static void spu_gov_stop(struct cpufreq_policy *policy) 120 + { 121 + unsigned int cpu = policy->cpu; 122 + struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu); 123 + int i; 124 + 125 + /* cancel timer */ 126 + spu_gov_cancel_work(info); 127 + 128 + /* clean spu_gov_info for all affected cpus */ 129 + for_each_cpu (i, policy->cpus) { 130 + info = &per_cpu(spu_gov_info, i); 131 + info->policy = NULL; 132 + } 138 133 } 139 134 140 135 static struct cpufreq_governor spu_governor = { 141 136 .name = "spudemand", 142 - .governor = spu_gov_govern, 137 + .start = spu_gov_start, 138 + .stop = spu_gov_stop, 143 139 .owner = THIS_MODULE, 144 140 }; 145 141
+20 -11
drivers/cpufreq/cpufreq.c
··· 2023 2023 2024 2024 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); 2025 2025 2026 - ret = policy->governor->governor(policy, CPUFREQ_GOV_POLICY_INIT); 2027 - if (ret) { 2028 - module_put(policy->governor->owner); 2029 - return ret; 2026 + if (policy->governor->init) { 2027 + ret = policy->governor->init(policy); 2028 + if (ret) { 2029 + module_put(policy->governor->owner); 2030 + return ret; 2031 + } 2030 2032 } 2031 2033 2032 2034 policy->governor->initialized++; ··· 2042 2040 2043 2041 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); 2044 2042 2045 - policy->governor->governor(policy, CPUFREQ_GOV_POLICY_EXIT); 2043 + if (policy->governor->exit) 2044 + policy->governor->exit(policy); 2046 2045 2047 2046 policy->governor->initialized--; 2048 2047 module_put(policy->governor->owner); ··· 2064 2061 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) 2065 2062 cpufreq_update_current_freq(policy); 2066 2063 2067 - ret = policy->governor->governor(policy, CPUFREQ_GOV_START); 2068 - if (ret) 2069 - return ret; 2064 + if (policy->governor->start) { 2065 + ret = policy->governor->start(policy); 2066 + if (ret) 2067 + return ret; 2068 + } 2070 2069 2071 - policy->governor->governor(policy, CPUFREQ_GOV_LIMITS); 2070 + if (policy->governor->limits) 2071 + policy->governor->limits(policy); 2072 + 2072 2073 return 0; 2073 2074 } 2074 2075 ··· 2083 2076 2084 2077 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); 2085 2078 2086 - policy->governor->governor(policy, CPUFREQ_GOV_STOP); 2079 + if (policy->governor->stop) 2080 + policy->governor->stop(policy); 2087 2081 } 2088 2082 2089 2083 static void cpufreq_governor_limits(struct cpufreq_policy *policy) ··· 2094 2086 2095 2087 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); 2096 2088 2097 - policy->governor->governor(policy, CPUFREQ_GOV_LIMITS); 2089 + if (policy->governor->limits) 2090 + policy->governor->limits(policy); 2098 2091 } 2099 2092 2100 2093 int cpufreq_register_governor(struct cpufreq_governor *governor)
+1 -6
drivers/cpufreq/cpufreq_conservative.c
··· 313 313 } 314 314 315 315 static struct dbs_governor cs_dbs_gov = { 316 - .gov = { 317 - .name = "conservative", 318 - .governor = cpufreq_governor_dbs, 319 - .max_transition_latency = TRANSITION_LATENCY_LIMIT, 320 - .owner = THIS_MODULE, 321 - }, 316 + .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"), 322 317 .kobj_type = { .default_attrs = cs_attributes }, 323 318 .gov_dbs_timer = cs_dbs_timer, 324 319 .alloc = cs_alloc,
+10 -29
drivers/cpufreq/cpufreq_governor.c
··· 389 389 gov->free(policy_dbs); 390 390 } 391 391 392 - static int cpufreq_governor_init(struct cpufreq_policy *policy) 392 + int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) 393 393 { 394 394 struct dbs_governor *gov = dbs_governor_of(policy); 395 395 struct dbs_data *dbs_data; ··· 474 474 mutex_unlock(&gov_dbs_data_mutex); 475 475 return ret; 476 476 } 477 + EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init); 477 478 478 - static int cpufreq_governor_exit(struct cpufreq_policy *policy) 479 + void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy) 479 480 { 480 481 struct dbs_governor *gov = dbs_governor_of(policy); 481 482 struct policy_dbs_info *policy_dbs = policy->governor_data; ··· 501 500 free_policy_dbs_info(policy_dbs, gov); 502 501 503 502 mutex_unlock(&gov_dbs_data_mutex); 504 - return 0; 505 503 } 504 + EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit); 506 505 507 - static int cpufreq_governor_start(struct cpufreq_policy *policy) 506 + int cpufreq_dbs_governor_start(struct cpufreq_policy *policy) 508 507 { 509 508 struct dbs_governor *gov = dbs_governor_of(policy); 510 509 struct policy_dbs_info *policy_dbs = policy->governor_data; ··· 540 539 gov_set_update_util(policy_dbs, sampling_rate); 541 540 return 0; 542 541 } 542 + EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start); 543 543 544 - static int cpufreq_governor_stop(struct cpufreq_policy *policy) 544 + void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy) 545 545 { 546 546 gov_cancel_work(policy); 547 - return 0; 548 547 } 548 + EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop); 549 549 550 - static int cpufreq_governor_limits(struct cpufreq_policy *policy) 550 + void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy) 551 551 { 552 552 struct policy_dbs_info *policy_dbs = policy->governor_data; 553 553 ··· 562 560 gov_update_sample_delay(policy_dbs, 0); 563 561 564 562 mutex_unlock(&policy_dbs->timer_mutex); 565 - 566 - return 0; 567 563 } 568 - 569 - int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) 570 - { 571 - if (event == CPUFREQ_GOV_POLICY_INIT) { 572 - return cpufreq_governor_init(policy); 573 - } else if (policy->governor_data) { 574 - switch (event) { 575 - case CPUFREQ_GOV_POLICY_EXIT: 576 - return cpufreq_governor_exit(policy); 577 - case CPUFREQ_GOV_START: 578 - return cpufreq_governor_start(policy); 579 - case CPUFREQ_GOV_STOP: 580 - return cpufreq_governor_stop(policy); 581 - case CPUFREQ_GOV_LIMITS: 582 - return cpufreq_governor_limits(policy); 583 - } 584 - } 585 - return -EINVAL; 586 - } 587 - EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); 564 + EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
+19 -1
drivers/cpufreq/cpufreq_governor.h
··· 148 148 return container_of(policy->governor, struct dbs_governor, gov); 149 149 } 150 150 151 + /* Governor callback routines */ 152 + int cpufreq_dbs_governor_init(struct cpufreq_policy *policy); 153 + void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy); 154 + int cpufreq_dbs_governor_start(struct cpufreq_policy *policy); 155 + void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy); 156 + void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy); 157 + 158 + #define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \ 159 + { \ 160 + .name = _name_, \ 161 + .max_transition_latency = TRANSITION_LATENCY_LIMIT, \ 162 + .owner = THIS_MODULE, \ 163 + .init = cpufreq_dbs_governor_init, \ 164 + .exit = cpufreq_dbs_governor_exit, \ 165 + .start = cpufreq_dbs_governor_start, \ 166 + .stop = cpufreq_dbs_governor_stop, \ 167 + .limits = cpufreq_dbs_governor_limits, \ 168 + } 169 + 151 170 /* Governor specific operations */ 152 171 struct od_ops { 153 172 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, ··· 174 155 }; 175 156 176 157 unsigned int dbs_update(struct cpufreq_policy *policy); 177 - int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event); 178 158 void od_register_powersave_bias_handler(unsigned int (*f) 179 159 (struct cpufreq_policy *, unsigned int, unsigned int), 180 160 unsigned int powersave_bias);
+1 -6
drivers/cpufreq/cpufreq_ondemand.c
··· 420 420 }; 421 421 422 422 static struct dbs_governor od_dbs_gov = { 423 - .gov = { 424 - .name = "ondemand", 425 - .governor = cpufreq_governor_dbs, 426 - .max_transition_latency = TRANSITION_LATENCY_LIMIT, 427 - .owner = THIS_MODULE, 428 - }, 423 + .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"), 429 424 .kobj_type = { .default_attrs = od_attributes }, 430 425 .gov_dbs_timer = od_dbs_timer, 431 426 .alloc = od_alloc,
+4 -13
drivers/cpufreq/cpufreq_performance.c
··· 16 16 #include <linux/init.h> 17 17 #include <linux/module.h> 18 18 19 - static int cpufreq_governor_performance(struct cpufreq_policy *policy, 20 - unsigned int event) 19 + static void cpufreq_gov_performance_limits(struct cpufreq_policy *policy) 21 20 { 22 - switch (event) { 23 - case CPUFREQ_GOV_LIMITS: 24 - pr_debug("setting to %u kHz\n", policy->max); 25 - __cpufreq_driver_target(policy, policy->max, 26 - CPUFREQ_RELATION_H); 27 - break; 28 - default: 29 - break; 30 - } 31 - return 0; 21 + pr_debug("setting to %u kHz\n", policy->max); 22 + __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); 32 23 } 33 24 34 25 static struct cpufreq_governor cpufreq_gov_performance = { 35 26 .name = "performance", 36 - .governor = cpufreq_governor_performance, 37 27 .owner = THIS_MODULE, 28 + .limits = cpufreq_gov_performance_limits, 38 29 }; 39 30 40 31 static int __init cpufreq_gov_performance_init(void)
+4 -13
drivers/cpufreq/cpufreq_powersave.c
··· 16 16 #include <linux/init.h> 17 17 #include <linux/module.h> 18 18 19 - static int cpufreq_governor_powersave(struct cpufreq_policy *policy, 20 - unsigned int event) 19 + static void cpufreq_gov_powersave_limits(struct cpufreq_policy *policy) 21 20 { 22 - switch (event) { 23 - case CPUFREQ_GOV_LIMITS: 24 - pr_debug("setting to %u kHz\n", policy->min); 25 - __cpufreq_driver_target(policy, policy->min, 26 - CPUFREQ_RELATION_L); 27 - break; 28 - default: 29 - break; 30 - } 31 - return 0; 21 + pr_debug("setting to %u kHz\n", policy->min); 22 + __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); 32 23 } 33 24 34 25 static struct cpufreq_governor cpufreq_gov_powersave = { 35 26 .name = "powersave", 36 - .governor = cpufreq_governor_powersave, 27 + .limits = cpufreq_gov_powersave_limits, 37 28 .owner = THIS_MODULE, 38 29 }; 39 30
+48 -48
drivers/cpufreq/cpufreq_userspace.c
··· 65 65 return 0; 66 66 } 67 67 68 - static int cpufreq_governor_userspace(struct cpufreq_policy *policy, 69 - unsigned int event) 68 + static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy) 69 + { 70 + mutex_lock(&userspace_mutex); 71 + kfree(policy->governor_data); 72 + policy->governor_data = NULL; 73 + mutex_unlock(&userspace_mutex); 74 + } 75 + 76 + static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy) 70 77 { 71 78 unsigned int *setspeed = policy->governor_data; 72 - unsigned int cpu = policy->cpu; 73 - int rc = 0; 74 79 75 - if (event == CPUFREQ_GOV_POLICY_INIT) 76 - return cpufreq_userspace_policy_init(policy); 80 + BUG_ON(!policy->cur); 81 + pr_debug("started managing cpu %u\n", policy->cpu); 77 82 78 - if (!setspeed) 79 - return -EINVAL; 83 + mutex_lock(&userspace_mutex); 84 + per_cpu(cpu_is_managed, policy->cpu) = 1; 85 + *setspeed = policy->cur; 86 + mutex_unlock(&userspace_mutex); 87 + return 0; 88 + } 80 89 81 - switch (event) { 82 - case CPUFREQ_GOV_POLICY_EXIT: 83 - mutex_lock(&userspace_mutex); 84 - policy->governor_data = NULL; 85 - kfree(setspeed); 86 - mutex_unlock(&userspace_mutex); 87 - break; 88 - case CPUFREQ_GOV_START: 89 - BUG_ON(!policy->cur); 90 - pr_debug("started managing cpu %u\n", cpu); 90 + static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy) 91 + { 92 + unsigned int *setspeed = policy->governor_data; 91 93 92 - mutex_lock(&userspace_mutex); 93 - per_cpu(cpu_is_managed, cpu) = 1; 94 - *setspeed = policy->cur; 95 - mutex_unlock(&userspace_mutex); 96 - break; 97 - case CPUFREQ_GOV_STOP: 98 - pr_debug("managing cpu %u stopped\n", cpu); 94 + pr_debug("managing cpu %u stopped\n", policy->cpu); 99 95 100 - mutex_lock(&userspace_mutex); 101 - per_cpu(cpu_is_managed, cpu) = 0; 102 - *setspeed = 0; 103 - mutex_unlock(&userspace_mutex); 104 - break; 105 - case CPUFREQ_GOV_LIMITS: 106 - mutex_lock(&userspace_mutex); 107 - pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n", 108 - cpu, policy->min, policy->max, policy->cur, *setspeed); 96 + mutex_lock(&userspace_mutex); 97 + per_cpu(cpu_is_managed, policy->cpu) = 0; 98 + *setspeed = 0; 99 + mutex_unlock(&userspace_mutex); 100 + } 109 101 110 - if (policy->max < *setspeed) 111 - __cpufreq_driver_target(policy, policy->max, 112 - CPUFREQ_RELATION_H); 113 - else if (policy->min > *setspeed) 114 - __cpufreq_driver_target(policy, policy->min, 115 - CPUFREQ_RELATION_L); 116 - else 117 - __cpufreq_driver_target(policy, *setspeed, 118 - CPUFREQ_RELATION_L); 119 - mutex_unlock(&userspace_mutex); 120 - break; 121 - } 122 - return rc; 102 + static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy) 103 + { 104 + unsigned int *setspeed = policy->governor_data; 105 + 106 + mutex_lock(&userspace_mutex); 107 + 108 + pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n", 109 + policy->cpu, policy->min, policy->max, policy->cur, *setspeed); 110 + 111 + if (policy->max < *setspeed) 112 + __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); 113 + else if (policy->min > *setspeed) 114 + __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); 115 + else 116 + __cpufreq_driver_target(policy, *setspeed, CPUFREQ_RELATION_L); 117 + 118 + mutex_unlock(&userspace_mutex); 123 119 } 124 120 125 121 static struct cpufreq_governor cpufreq_gov_userspace = { 126 122 .name = "userspace", 127 - .governor = cpufreq_governor_userspace, 123 + .init = cpufreq_userspace_policy_init, 124 + .exit = cpufreq_userspace_policy_exit, 125 + .start = cpufreq_userspace_policy_start, 126 + .stop = cpufreq_userspace_policy_stop, 127 + .limits = cpufreq_userspace_policy_limits, 128 128 .store_setspeed = cpufreq_set, 129 129 .show_setspeed = show_speed, 130 130 .owner = THIS_MODULE,
+5 -9
include/linux/cpufreq.h
··· 455 455 #define MIN_LATENCY_MULTIPLIER (20) 456 456 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 457 457 458 - /* Governor Events */ 459 - #define CPUFREQ_GOV_START 1 460 - #define CPUFREQ_GOV_STOP 2 461 - #define CPUFREQ_GOV_LIMITS 3 462 - #define CPUFREQ_GOV_POLICY_INIT 4 463 - #define CPUFREQ_GOV_POLICY_EXIT 5 464 - 465 458 struct cpufreq_governor { 466 459 char name[CPUFREQ_NAME_LEN]; 467 460 int initialized; 468 - int (*governor) (struct cpufreq_policy *policy, 469 - unsigned int event); 461 + int (*init)(struct cpufreq_policy *policy); 462 + void (*exit)(struct cpufreq_policy *policy); 463 + int (*start)(struct cpufreq_policy *policy); 464 + void (*stop)(struct cpufreq_policy *policy); 465 + void (*limits)(struct cpufreq_policy *policy); 470 466 ssize_t (*show_setspeed) (struct cpufreq_policy *policy, 471 467 char *buf); 472 468 int (*store_setspeed) (struct cpufreq_policy *policy,
+8 -26
kernel/sched/cpufreq_schedutil.c
··· 394 394 return ret; 395 395 } 396 396 397 - static int sugov_exit(struct cpufreq_policy *policy) 397 + static void sugov_exit(struct cpufreq_policy *policy) 398 398 { 399 399 struct sugov_policy *sg_policy = policy->governor_data; 400 400 struct sugov_tunables *tunables = sg_policy->tunables; ··· 412 412 mutex_unlock(&global_tunables_lock); 413 413 414 414 sugov_policy_free(sg_policy); 415 - return 0; 416 415 } 417 416 418 417 static int sugov_start(struct cpufreq_policy *policy) ··· 443 444 return 0; 444 445 } 445 446 446 - static int sugov_stop(struct cpufreq_policy *policy) 447 + static void sugov_stop(struct cpufreq_policy *policy) 447 448 { 448 449 struct sugov_policy *sg_policy = policy->governor_data; 449 450 unsigned int cpu; ··· 455 456 456 457 irq_work_sync(&sg_policy->irq_work); 457 458 cancel_work_sync(&sg_policy->work); 458 - return 0; 459 459 } 460 460 461 - static int sugov_limits(struct cpufreq_policy *policy) 461 + static void sugov_limits(struct cpufreq_policy *policy) 462 462 { 463 463 struct sugov_policy *sg_policy = policy->governor_data; 464 464 ··· 475 477 } 476 478 477 479 sg_policy->need_freq_update = true; 478 - return 0; 479 - } 480 - 481 - int sugov_governor(struct cpufreq_policy *policy, unsigned int event) 482 - { 483 - if (event == CPUFREQ_GOV_POLICY_INIT) { 484 - return sugov_init(policy); 485 - } else if (policy->governor_data) { 486 - switch (event) { 487 - case CPUFREQ_GOV_POLICY_EXIT: 488 - return sugov_exit(policy); 489 - case CPUFREQ_GOV_START: 490 - return sugov_start(policy); 491 - case CPUFREQ_GOV_STOP: 492 - return sugov_stop(policy); 493 - case CPUFREQ_GOV_LIMITS: 494 - return sugov_limits(policy); 495 - } 496 - } 497 - return -EINVAL; 498 480 } 499 481 500 482 static struct cpufreq_governor schedutil_gov = { 501 483 .name = "schedutil", 502 - .governor = sugov_governor, 503 484 .owner = THIS_MODULE, 485 + .init = sugov_init, 486 + .exit = sugov_exit, 487 + .start = sugov_start, 488 + .stop = sugov_stop, 489 + .limits = sugov_limits, 504 490 }; 505 491 506 492 static int __init sugov_module_init(void)