Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'rust/cpufreq-dt' into cpufreq/arm/linux-next

+3829 -302
+11
MAINTAINERS
··· 5883 5883 F: include/linux/clk-pr* 5884 5884 F: include/linux/clk/ 5885 5885 F: include/linux/of_clk.h 5886 + F: rust/helpers/clk.c 5887 + F: rust/kernel/clk.rs 5886 5888 X: drivers/clk/clkdev.c 5887 5889 5888 5890 COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3) ··· 6142 6140 F: include/linux/cpufreq.h 6143 6141 F: include/linux/sched/cpufreq.h 6144 6142 F: kernel/sched/cpufreq*.c 6143 + F: rust/kernel/cpufreq.rs 6145 6144 F: tools/testing/selftests/cpufreq/ 6146 6145 6147 6146 CPU HOTPLUG ··· 6156 6153 F: include/linux/smpboot.h 6157 6154 F: kernel/cpu.c 6158 6155 F: kernel/smpboot.* 6156 + F: rust/kernel/cpu.rs 6159 6157 6160 6158 CPU IDLE TIME MANAGEMENT FRAMEWORK 6161 6159 M: "Rafael J. Wysocki" <rafael@kernel.org> ··· 6240 6236 L: linux-riscv@lists.infradead.org 6241 6237 S: Maintained 6242 6238 F: drivers/cpuidle/cpuidle-riscv-sbi.c 6239 + 6240 + CPUMASK API [RUST] 6241 + M: Viresh Kumar <viresh.kumar@linaro.org> 6242 + R: Yury Norov <yury.norov@gmail.com> 6243 + S: Maintained 6244 + F: rust/kernel/cpumask.rs 6243 6245 6244 6246 CRAMFS FILESYSTEM 6245 6247 M: Nicolas Pitre <nico@fluxnic.net> ··· 18166 18156 F: Documentation/power/opp.rst 18167 18157 F: drivers/opp/ 18168 18158 F: include/linux/pm_opp.h 18159 + F: rust/kernel/opp.rs 18169 18160 18170 18161 OPL4 DRIVER 18171 18162 M: Clemens Ladisch <clemens@ladisch.de>
+12
drivers/cpufreq/Kconfig
··· 217 217 218 218 If in doubt, say N. 219 219 220 + config CPUFREQ_DT_RUST 221 + tristate "Rust based Generic DT based cpufreq driver" 222 + depends on HAVE_CLK && OF && RUST 223 + select CPUFREQ_DT_PLATDEV 224 + select PM_OPP 225 + help 226 + This adds a Rust based generic DT based cpufreq driver for frequency 227 + management. It supports both uniprocessor (UP) and symmetric 228 + multiprocessor (SMP) systems. 229 + 230 + If in doubt, say N. 231 + 220 232 config CPUFREQ_VIRT 221 233 tristate "Virtual cpufreq driver" 222 234 depends on GENERIC_ARCH_TOPOLOGY
+1
drivers/cpufreq/Makefile
··· 15 15 obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o 16 16 17 17 obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o 18 + obj-$(CONFIG_CPUFREQ_DT_RUST) += rcpufreq_dt.o 18 19 obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o 19 20 obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o 20 21
+2 -5
drivers/cpufreq/amd-pstate.c
··· 821 821 schedule_work(&sched_prefcore_work); 822 822 } 823 823 824 - static void amd_pstate_update_limits(unsigned int cpu) 824 + static void amd_pstate_update_limits(struct cpufreq_policy *policy) 825 825 { 826 - struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); 827 826 struct amd_cpudata *cpudata; 828 827 u32 prev_high = 0, cur_high = 0; 829 828 bool highest_perf_changed = false; 829 + unsigned int cpu = policy->cpu; 830 830 831 831 if (!amd_pstate_prefcore) 832 - return; 833 - 834 - if (!policy) 835 832 return; 836 833 837 834 if (amd_get_highest_perf(cpu, &cur_high))
+152 -199
drivers/cpufreq/cpufreq.c
··· 255 255 } 256 256 EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 257 257 258 - /** 259 - * cpufreq_cpu_release - Unlock a policy and decrement its usage counter. 260 - * @policy: cpufreq policy returned by cpufreq_cpu_acquire(). 261 - */ 262 - void cpufreq_cpu_release(struct cpufreq_policy *policy) 263 - { 264 - if (WARN_ON(!policy)) 265 - return; 266 - 267 - lockdep_assert_held(&policy->rwsem); 268 - 269 - up_write(&policy->rwsem); 270 - 271 - cpufreq_cpu_put(policy); 272 - } 273 - 274 - /** 275 - * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it. 276 - * @cpu: CPU to find the policy for. 277 - * 278 - * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and 279 - * if the policy returned by it is not NULL, acquire its rwsem for writing. 280 - * Return the policy if it is active or release it and return NULL otherwise. 281 - * 282 - * The policy returned by this function has to be released with the help of 283 - * cpufreq_cpu_release() in order to release its rwsem and balance its usage 284 - * counter properly. 285 - */ 286 - struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu) 287 - { 288 - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 289 - 290 - if (!policy) 291 - return NULL; 292 - 293 - down_write(&policy->rwsem); 294 - 295 - if (policy_is_inactive(policy)) { 296 - cpufreq_cpu_release(policy); 297 - return NULL; 298 - } 299 - 300 - return policy; 301 - } 302 - 303 258 /********************************************************************* 304 259 * EXTERNALLY AFFECTING FREQUENCY CHANGES * 305 260 *********************************************************************/ ··· 964 1009 { 965 1010 struct cpufreq_policy *policy = to_policy(kobj); 966 1011 struct freq_attr *fattr = to_attr(attr); 967 - ssize_t ret = -EBUSY; 968 1012 969 1013 if (!fattr->show) 970 1014 return -EIO; 971 1015 972 - down_read(&policy->rwsem); 973 - if (likely(!policy_is_inactive(policy))) 974 - ret = fattr->show(policy, buf); 975 - up_read(&policy->rwsem); 1016 + guard(cpufreq_policy_read)(policy); 976 1017 977 - return ret; 1018 + if (likely(!policy_is_inactive(policy))) 1019 + return fattr->show(policy, buf); 1020 + 1021 + return -EBUSY; 978 1022 } 979 1023 980 1024 static ssize_t store(struct kobject *kobj, struct attribute *attr, ··· 981 1027 { 982 1028 struct cpufreq_policy *policy = to_policy(kobj); 983 1029 struct freq_attr *fattr = to_attr(attr); 984 - ssize_t ret = -EBUSY; 985 1030 986 1031 if (!fattr->store) 987 1032 return -EIO; 988 1033 989 - down_write(&policy->rwsem); 990 - if (likely(!policy_is_inactive(policy))) 991 - ret = fattr->store(policy, buf, count); 992 - up_write(&policy->rwsem); 1034 + guard(cpufreq_policy_write)(policy); 993 1035 994 - return ret; 1036 + if (likely(!policy_is_inactive(policy))) 1037 + return fattr->store(policy, buf, count); 1038 + 1039 + return -EBUSY; 995 1040 } 996 1041 997 1042 static void cpufreq_sysfs_release(struct kobject *kobj) ··· 1148 1195 if (cpumask_test_cpu(cpu, policy->cpus)) 1149 1196 return 0; 1150 1197 1151 - down_write(&policy->rwsem); 1198 + guard(cpufreq_policy_write)(policy); 1199 + 1152 1200 if (has_target()) 1153 1201 cpufreq_stop_governor(policy); 1154 1202 ··· 1160 1206 if (ret) 1161 1207 pr_err("%s: Failed to start governor\n", __func__); 1162 1208 } 1163 - up_write(&policy->rwsem); 1209 + 1164 1210 return ret; 1165 1211 } 1166 1212 ··· 1180 1226 container_of(work, struct cpufreq_policy, update); 1181 1227 1182 1228 pr_debug("handle_update for cpu %u called\n", policy->cpu); 1183 - down_write(&policy->rwsem); 1229 + 1230 + guard(cpufreq_policy_write)(policy); 1231 + 1184 1232 refresh_frequency_limits(policy); 1185 - up_write(&policy->rwsem); 1186 1233 } 1187 1234 1188 1235 static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq, ··· 1209 1254 struct kobject *kobj; 1210 1255 struct completion *cmp; 1211 1256 1212 - down_write(&policy->rwsem); 1213 - cpufreq_stats_free_table(policy); 1214 - kobj = &policy->kobj; 1215 - cmp = &policy->kobj_unregister; 1216 - up_write(&policy->rwsem); 1257 + scoped_guard(cpufreq_policy_write, policy) { 1258 + cpufreq_stats_free_table(policy); 1259 + kobj = &policy->kobj; 1260 + cmp = &policy->kobj_unregister; 1261 + } 1217 1262 kobject_put(kobj); 1218 1263 1219 1264 /* ··· 1289 1334 init_waitqueue_head(&policy->transition_wait); 1290 1335 INIT_WORK(&policy->update, handle_update); 1291 1336 1292 - policy->cpu = cpu; 1293 1337 return policy; 1294 1338 1295 1339 err_min_qos_notifier: ··· 1357 1403 kfree(policy); 1358 1404 } 1359 1405 1360 - static int cpufreq_online(unsigned int cpu) 1406 + static int cpufreq_policy_online(struct cpufreq_policy *policy, 1407 + unsigned int cpu, bool new_policy) 1361 1408 { 1362 - struct cpufreq_policy *policy; 1363 - bool new_policy; 1364 1409 unsigned long flags; 1365 1410 unsigned int j; 1366 1411 int ret; 1367 1412 1368 - pr_debug("%s: bringing CPU%u online\n", __func__, cpu); 1413 + guard(cpufreq_policy_write)(policy); 1369 1414 1370 - /* Check if this CPU already has a policy to manage it */ 1371 - policy = per_cpu(cpufreq_cpu_data, cpu); 1372 - if (policy) { 1373 - WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); 1374 - if (!policy_is_inactive(policy)) 1375 - return cpufreq_add_policy_cpu(policy, cpu); 1376 - 1377 - /* This is the only online CPU for the policy. Start over. */ 1378 - new_policy = false; 1379 - down_write(&policy->rwsem); 1380 - policy->cpu = cpu; 1381 - policy->governor = NULL; 1382 - } else { 1383 - new_policy = true; 1384 - policy = cpufreq_policy_alloc(cpu); 1385 - if (!policy) 1386 - return -ENOMEM; 1387 - down_write(&policy->rwsem); 1388 - } 1415 + policy->cpu = cpu; 1416 + policy->governor = NULL; 1389 1417 1390 1418 if (!new_policy && cpufreq_driver->online) { 1391 1419 /* Recover policy->cpus using related_cpus */ ··· 1390 1454 if (ret) { 1391 1455 pr_debug("%s: %d: initialization failed\n", __func__, 1392 1456 __LINE__); 1393 - goto out_free_policy; 1457 + goto out_clear_policy; 1394 1458 } 1395 1459 1396 1460 /* ··· 1541 1605 goto out_destroy_policy; 1542 1606 } 1543 1607 1544 - up_write(&policy->rwsem); 1608 + return 0; 1609 + 1610 + out_destroy_policy: 1611 + for_each_cpu(j, policy->real_cpus) 1612 + remove_cpu_dev_symlink(policy, j, get_cpu_device(j)); 1613 + 1614 + out_offline_policy: 1615 + if (cpufreq_driver->offline) 1616 + cpufreq_driver->offline(policy); 1617 + 1618 + out_exit_policy: 1619 + if (cpufreq_driver->exit) 1620 + cpufreq_driver->exit(policy); 1621 + 1622 + out_clear_policy: 1623 + cpumask_clear(policy->cpus); 1624 + 1625 + return ret; 1626 + } 1627 + 1628 + static int cpufreq_online(unsigned int cpu) 1629 + { 1630 + struct cpufreq_policy *policy; 1631 + bool new_policy; 1632 + int ret; 1633 + 1634 + pr_debug("%s: bringing CPU%u online\n", __func__, cpu); 1635 + 1636 + /* Check if this CPU already has a policy to manage it */ 1637 + policy = per_cpu(cpufreq_cpu_data, cpu); 1638 + if (policy) { 1639 + WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); 1640 + if (!policy_is_inactive(policy)) 1641 + return cpufreq_add_policy_cpu(policy, cpu); 1642 + 1643 + /* This is the only online CPU for the policy. Start over. */ 1644 + new_policy = false; 1645 + } else { 1646 + new_policy = true; 1647 + policy = cpufreq_policy_alloc(cpu); 1648 + if (!policy) 1649 + return -ENOMEM; 1650 + } 1651 + 1652 + ret = cpufreq_policy_online(policy, cpu, new_policy); 1653 + if (ret) { 1654 + cpufreq_policy_free(policy); 1655 + return ret; 1656 + } 1545 1657 1546 1658 kobject_uevent(&policy->kobj, KOBJ_ADD); 1547 1659 ··· 1617 1633 pr_debug("initialization complete\n"); 1618 1634 1619 1635 return 0; 1620 - 1621 - out_destroy_policy: 1622 - for_each_cpu(j, policy->real_cpus) 1623 - remove_cpu_dev_symlink(policy, j, get_cpu_device(j)); 1624 - 1625 - out_offline_policy: 1626 - if (cpufreq_driver->offline) 1627 - cpufreq_driver->offline(policy); 1628 - 1629 - out_exit_policy: 1630 - if (cpufreq_driver->exit) 1631 - cpufreq_driver->exit(policy); 1632 - 1633 - out_free_policy: 1634 - cpumask_clear(policy->cpus); 1635 - up_write(&policy->rwsem); 1636 - 1637 - cpufreq_policy_free(policy); 1638 - return ret; 1639 1636 } 1640 1637 1641 1638 /** ··· 1706 1741 return 0; 1707 1742 } 1708 1743 1709 - down_write(&policy->rwsem); 1744 + guard(cpufreq_policy_write)(policy); 1710 1745 1711 1746 __cpufreq_offline(cpu, policy); 1712 1747 1713 - up_write(&policy->rwsem); 1714 1748 return 0; 1715 1749 } 1716 1750 ··· 1726 1762 if (!policy) 1727 1763 return; 1728 1764 1729 - down_write(&policy->rwsem); 1765 + scoped_guard(cpufreq_policy_write, policy) { 1766 + if (cpu_online(cpu)) 1767 + __cpufreq_offline(cpu, policy); 1730 1768 1731 - if (cpu_online(cpu)) 1732 - __cpufreq_offline(cpu, policy); 1769 + remove_cpu_dev_symlink(policy, cpu, dev); 1733 1770 1734 - remove_cpu_dev_symlink(policy, cpu, dev); 1771 + if (!cpumask_empty(policy->real_cpus)) 1772 + return; 1735 1773 1736 - if (!cpumask_empty(policy->real_cpus)) { 1737 - up_write(&policy->rwsem); 1738 - return; 1774 + /* 1775 + * Unregister cpufreq cooling once all the CPUs of the policy 1776 + * are removed. 1777 + */ 1778 + if (cpufreq_thermal_control_enabled(cpufreq_driver)) { 1779 + cpufreq_cooling_unregister(policy->cdev); 1780 + policy->cdev = NULL; 1781 + } 1782 + 1783 + /* We did light-weight exit earlier, do full tear down now */ 1784 + if (cpufreq_driver->offline && cpufreq_driver->exit) 1785 + cpufreq_driver->exit(policy); 1739 1786 } 1740 - 1741 - /* 1742 - * Unregister cpufreq cooling once all the CPUs of the policy are 1743 - * removed. 1744 - */ 1745 - if (cpufreq_thermal_control_enabled(cpufreq_driver)) { 1746 - cpufreq_cooling_unregister(policy->cdev); 1747 - policy->cdev = NULL; 1748 - } 1749 - 1750 - /* We did light-weight exit earlier, do full tear down now */ 1751 - if (cpufreq_driver->offline && cpufreq_driver->exit) 1752 - cpufreq_driver->exit(policy); 1753 - 1754 - up_write(&policy->rwsem); 1755 1787 1756 1788 cpufreq_policy_free(policy); 1757 1789 } ··· 1818 1858 */ 1819 1859 unsigned int cpufreq_quick_get(unsigned int cpu) 1820 1860 { 1821 - struct cpufreq_policy *policy; 1822 - unsigned int ret_freq = 0; 1861 + struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL; 1823 1862 unsigned long flags; 1824 1863 1825 1864 read_lock_irqsave(&cpufreq_driver_lock, flags); 1826 1865 1827 1866 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) { 1828 - ret_freq = cpufreq_driver->get(cpu); 1867 + unsigned int ret_freq = cpufreq_driver->get(cpu); 1868 + 1829 1869 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1870 + 1830 1871 return ret_freq; 1831 1872 } 1832 1873 1833 1874 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1834 1875 1835 1876 policy = cpufreq_cpu_get(cpu); 1836 - if (policy) { 1837 - ret_freq = policy->cur; 1838 - cpufreq_cpu_put(policy); 1839 - } 1877 + if (policy) 1878 + return policy->cur; 1840 1879 1841 - return ret_freq; 1880 + return 0; 1842 1881 } 1843 1882 EXPORT_SYMBOL(cpufreq_quick_get); 1844 1883 ··· 1849 1890 */ 1850 1891 unsigned int cpufreq_quick_get_max(unsigned int cpu) 1851 1892 { 1852 - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1853 - unsigned int ret_freq = 0; 1893 + struct cpufreq_policy *policy __free(put_cpufreq_policy); 1854 1894 1855 - if (policy) { 1856 - ret_freq = policy->max; 1857 - cpufreq_cpu_put(policy); 1858 - } 1895 + policy = cpufreq_cpu_get(cpu); 1896 + if (policy) 1897 + return policy->max; 1859 1898 1860 - return ret_freq; 1899 + return 0; 1861 1900 } 1862 1901 EXPORT_SYMBOL(cpufreq_quick_get_max); 1863 1902 ··· 1867 1910 */ 1868 1911 __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu) 1869 1912 { 1870 - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1871 - unsigned int ret_freq = 0; 1913 + struct cpufreq_policy *policy __free(put_cpufreq_policy); 1872 1914 1873 - if (policy) { 1874 - ret_freq = policy->cpuinfo.max_freq; 1875 - cpufreq_cpu_put(policy); 1876 - } 1915 + policy = cpufreq_cpu_get(cpu); 1916 + if (policy) 1917 + return policy->cpuinfo.max_freq; 1877 1918 1878 - return ret_freq; 1919 + return 0; 1879 1920 } 1880 1921 EXPORT_SYMBOL(cpufreq_get_hw_max_freq); 1881 1922 ··· 1893 1938 */ 1894 1939 unsigned int cpufreq_get(unsigned int cpu) 1895 1940 { 1896 - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1897 - unsigned int ret_freq = 0; 1941 + struct cpufreq_policy *policy __free(put_cpufreq_policy); 1898 1942 1899 - if (policy) { 1900 - down_read(&policy->rwsem); 1901 - if (cpufreq_driver->get) 1902 - ret_freq = __cpufreq_get(policy); 1903 - up_read(&policy->rwsem); 1943 + policy = cpufreq_cpu_get(cpu); 1944 + if (!policy) 1945 + return 0; 1904 1946 1905 - cpufreq_cpu_put(policy); 1906 - } 1947 + guard(cpufreq_policy_read)(policy); 1907 1948 1908 - return ret_freq; 1949 + if (cpufreq_driver->get) 1950 + return __cpufreq_get(policy); 1951 + 1952 + return 0; 1909 1953 } 1910 1954 EXPORT_SYMBOL(cpufreq_get); 1911 1955 ··· 1963 2009 1964 2010 for_each_active_policy(policy) { 1965 2011 if (has_target()) { 1966 - down_write(&policy->rwsem); 1967 - cpufreq_stop_governor(policy); 1968 - up_write(&policy->rwsem); 2012 + scoped_guard(cpufreq_policy_write, policy) { 2013 + cpufreq_stop_governor(policy); 2014 + } 1969 2015 } 1970 2016 1971 2017 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy)) ··· 2006 2052 pr_err("%s: Failed to resume driver: %s\n", __func__, 2007 2053 cpufreq_driver->name); 2008 2054 } else if (has_target()) { 2009 - down_write(&policy->rwsem); 2010 - ret = cpufreq_start_governor(policy); 2011 - up_write(&policy->rwsem); 2055 + scoped_guard(cpufreq_policy_write, policy) { 2056 + ret = cpufreq_start_governor(policy); 2057 + } 2012 2058 2013 2059 if (ret) 2014 2060 pr_err("%s: Failed to start governor for CPU%u's policy\n", ··· 2375 2421 unsigned int target_freq, 2376 2422 unsigned int relation) 2377 2423 { 2378 - int ret; 2424 + guard(cpufreq_policy_write)(policy); 2379 2425 2380 - down_write(&policy->rwsem); 2381 - 2382 - ret = __cpufreq_driver_target(policy, target_freq, relation); 2383 - 2384 - up_write(&policy->rwsem); 2385 - 2386 - return ret; 2426 + return __cpufreq_driver_target(policy, target_freq, relation); 2387 2427 } 2388 2428 EXPORT_SYMBOL_GPL(cpufreq_driver_target); 2389 2429 ··· 2559 2611 */ 2560 2612 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) 2561 2613 { 2562 - struct cpufreq_policy *cpu_policy; 2614 + struct cpufreq_policy *cpu_policy __free(put_cpufreq_policy); 2615 + 2563 2616 if (!policy) 2564 2617 return -EINVAL; 2565 2618 ··· 2570 2621 2571 2622 memcpy(policy, cpu_policy, sizeof(*policy)); 2572 2623 2573 - cpufreq_cpu_put(cpu_policy); 2574 2624 return 0; 2575 2625 } 2576 2626 EXPORT_SYMBOL(cpufreq_get_policy); ··· 2717 2769 return ret; 2718 2770 } 2719 2771 2772 + static void cpufreq_policy_refresh(struct cpufreq_policy *policy) 2773 + { 2774 + guard(cpufreq_policy_write)(policy); 2775 + 2776 + /* 2777 + * BIOS might change freq behind our back 2778 + * -> ask driver for current freq and notify governors about a change 2779 + */ 2780 + if (cpufreq_driver->get && has_target() && 2781 + (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false)))) 2782 + return; 2783 + 2784 + refresh_frequency_limits(policy); 2785 + } 2786 + 2720 2787 /** 2721 2788 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy. 2722 2789 * @cpu: CPU to re-evaluate the policy for. ··· 2743 2780 */ 2744 2781 void cpufreq_update_policy(unsigned int cpu) 2745 2782 { 2746 - struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); 2783 + struct cpufreq_policy *policy __free(put_cpufreq_policy); 2747 2784 2785 + policy = cpufreq_cpu_get(cpu); 2748 2786 if (!policy) 2749 2787 return; 2750 2788 2751 - /* 2752 - * BIOS might change freq behind our back 2753 - * -> ask driver for current freq and notify governors about a change 2754 - */ 2755 - if (cpufreq_driver->get && has_target() && 2756 - (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false)))) 2757 - goto unlock; 2758 - 2759 - refresh_frequency_limits(policy); 2760 - 2761 - unlock: 2762 - cpufreq_cpu_release(policy); 2789 + cpufreq_policy_refresh(policy); 2763 2790 } 2764 2791 EXPORT_SYMBOL(cpufreq_update_policy); 2765 2792 ··· 2758 2805 * @cpu: CPU to update the policy limits for. 2759 2806 * 2760 2807 * Invoke the driver's ->update_limits callback if present or call 2761 - * cpufreq_update_policy() for @cpu. 2808 + * cpufreq_policy_refresh() for @cpu. 2762 2809 */ 2763 2810 void cpufreq_update_limits(unsigned int cpu) 2764 2811 { ··· 2769 2816 return; 2770 2817 2771 2818 if (cpufreq_driver->update_limits) 2772 - cpufreq_driver->update_limits(cpu); 2819 + cpufreq_driver->update_limits(policy); 2773 2820 else 2774 - cpufreq_update_policy(cpu); 2821 + cpufreq_policy_refresh(policy); 2775 2822 } 2776 2823 EXPORT_SYMBOL_GPL(cpufreq_update_limits); 2777 2824
+18 -29
drivers/cpufreq/intel_pstate.c
··· 1353 1353 cpufreq_update_policy(cpu); 1354 1354 } 1355 1355 1356 - static void __intel_pstate_update_max_freq(struct cpudata *cpudata, 1357 - struct cpufreq_policy *policy) 1356 + static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy, 1357 + struct cpudata *cpudata) 1358 1358 { 1359 + guard(cpufreq_policy_write)(policy); 1360 + 1359 1361 if (hwp_active) 1360 1362 intel_pstate_get_hwp_cap(cpudata); 1361 1363 ··· 1367 1365 refresh_frequency_limits(policy); 1368 1366 } 1369 1367 1370 - static void intel_pstate_update_limits(unsigned int cpu) 1368 + static bool intel_pstate_update_max_freq(struct cpudata *cpudata) 1371 1369 { 1372 - struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); 1373 - struct cpudata *cpudata; 1370 + struct cpufreq_policy *policy __free(put_cpufreq_policy); 1374 1371 1372 + policy = cpufreq_cpu_get(cpudata->cpu); 1375 1373 if (!policy) 1376 - return; 1374 + return false; 1377 1375 1378 - cpudata = all_cpu_data[cpu]; 1376 + __intel_pstate_update_max_freq(policy, cpudata); 1379 1377 1380 - __intel_pstate_update_max_freq(cpudata, policy); 1378 + return true; 1379 + } 1381 1380 1382 - /* Prevent the driver from being unregistered now. */ 1383 - mutex_lock(&intel_pstate_driver_lock); 1381 + static void intel_pstate_update_limits(struct cpufreq_policy *policy) 1382 + { 1383 + struct cpudata *cpudata = all_cpu_data[policy->cpu]; 1384 1384 1385 - cpufreq_cpu_release(policy); 1385 + __intel_pstate_update_max_freq(policy, cpudata); 1386 1386 1387 1387 hybrid_update_capacity(cpudata); 1388 - 1389 - mutex_unlock(&intel_pstate_driver_lock); 1390 1388 } 1391 1389 1392 1390 static void intel_pstate_update_limits_for_all(void) 1393 1391 { 1394 1392 int cpu; 1395 1393 1396 - for_each_possible_cpu(cpu) { 1397 - struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); 1398 - 1399 - if (!policy) 1400 - continue; 1401 - 1402 - __intel_pstate_update_max_freq(all_cpu_data[cpu], policy); 1403 - 1404 - cpufreq_cpu_release(policy); 1405 - } 1394 + for_each_possible_cpu(cpu) 1395 + intel_pstate_update_max_freq(all_cpu_data[cpu]); 1406 1396 1407 1397 mutex_lock(&hybrid_capacity_lock); 1408 1398 ··· 1834 1840 { 1835 1841 struct cpudata *cpudata = 1836 1842 container_of(to_delayed_work(work), struct cpudata, hwp_notify_work); 1837 - struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu); 1838 1843 1839 - if (policy) { 1840 - __intel_pstate_update_max_freq(cpudata, policy); 1841 - 1842 - cpufreq_cpu_release(policy); 1843 - 1844 + if (intel_pstate_update_max_freq(cpudata)) { 1844 1845 /* 1845 1846 * The driver will not be unregistered while this function is 1846 1847 * running, so update the capacity without acquiring the driver
+226
drivers/cpufreq/rcpufreq_dt.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + //! Rust based implementation of the cpufreq-dt driver. 4 + 5 + use kernel::{ 6 + c_str, 7 + clk::Clk, 8 + cpu, cpufreq, 9 + cpumask::CpumaskVar, 10 + device::{Core, Device}, 11 + error::code::*, 12 + fmt, 13 + macros::vtable, 14 + module_platform_driver, of, opp, platform, 15 + prelude::*, 16 + str::CString, 17 + sync::Arc, 18 + }; 19 + 20 + /// Finds exact supply name from the OF node. 21 + fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> { 22 + let prop_name = CString::try_from_fmt(fmt!("{}-supply", name)).ok()?; 23 + dev.property_present(&prop_name) 24 + .then(|| CString::try_from_fmt(fmt!("{name}")).ok()) 25 + .flatten() 26 + } 27 + 28 + /// Finds supply name for the CPU from DT. 29 + fn find_supply_names(dev: &Device, cpu: u32) -> Option<KVec<CString>> { 30 + // Try "cpu0" for older DTs, fallback to "cpu". 31 + let name = (cpu == 0) 32 + .then(|| find_supply_name_exact(dev, "cpu0")) 33 + .flatten() 34 + .or_else(|| find_supply_name_exact(dev, "cpu"))?; 35 + 36 + let mut list = KVec::with_capacity(1, GFP_KERNEL).ok()?; 37 + list.push(name, GFP_KERNEL).ok()?; 38 + 39 + Some(list) 40 + } 41 + 42 + /// Represents the cpufreq dt device. 43 + struct CPUFreqDTDevice { 44 + opp_table: opp::Table, 45 + freq_table: opp::FreqTable, 46 + _mask: CpumaskVar, 47 + _token: Option<opp::ConfigToken>, 48 + _clk: Clk, 49 + } 50 + 51 + #[derive(Default)] 52 + struct CPUFreqDTDriver; 53 + 54 + #[vtable] 55 + impl opp::ConfigOps for CPUFreqDTDriver {} 56 + 57 + #[vtable] 58 + impl cpufreq::Driver for CPUFreqDTDriver { 59 + const NAME: &'static CStr = c_str!("cpufreq-dt"); 60 + const FLAGS: u16 = cpufreq::flags::NEED_INITIAL_FREQ_CHECK | cpufreq::flags::IS_COOLING_DEV; 61 + const BOOST_ENABLED: bool = true; 62 + 63 + type PData = Arc<CPUFreqDTDevice>; 64 + 65 + fn init(policy: &mut cpufreq::Policy) -> Result<Self::PData> { 66 + let cpu = policy.cpu(); 67 + // SAFETY: The CPU device is only used during init; it won't get hot-unplugged. The cpufreq 68 + // core registers with CPU notifiers and the cpufreq core/driver won't use the CPU device, 69 + // once the CPU is hot-unplugged. 70 + let dev = unsafe { cpu::from_cpu(cpu)? }; 71 + let mut mask = CpumaskVar::new_zero(GFP_KERNEL)?; 72 + 73 + mask.set(cpu); 74 + 75 + let token = find_supply_names(dev, cpu) 76 + .map(|names| { 77 + opp::Config::<Self>::new() 78 + .set_regulator_names(names)? 79 + .set(dev) 80 + }) 81 + .transpose()?; 82 + 83 + // Get OPP-sharing information from "operating-points-v2" bindings. 84 + let fallback = match opp::Table::of_sharing_cpus(dev, &mut mask) { 85 + Ok(()) => false, 86 + Err(e) if e == ENOENT => { 87 + // "operating-points-v2" not supported. If the platform hasn't 88 + // set sharing CPUs, fallback to all CPUs share the `Policy` 89 + // for backward compatibility. 90 + opp::Table::sharing_cpus(dev, &mut mask).is_err() 91 + } 92 + Err(e) => return Err(e), 93 + }; 94 + 95 + // Initialize OPP tables for all policy cpus. 96 + // 97 + // For platforms not using "operating-points-v2" bindings, we do this 98 + // before updating policy cpus. Otherwise, we will end up creating 99 + // duplicate OPPs for the CPUs. 100 + // 101 + // OPPs might be populated at runtime, don't fail for error here unless 102 + // it is -EPROBE_DEFER. 103 + let mut opp_table = match opp::Table::from_of_cpumask(dev, &mut mask) { 104 + Ok(table) => table, 105 + Err(e) => { 106 + if e == EPROBE_DEFER { 107 + return Err(e); 108 + } 109 + 110 + // The table is added dynamically ? 111 + opp::Table::from_dev(dev)? 112 + } 113 + }; 114 + 115 + // The OPP table must be initialized, statically or dynamically, by this point. 116 + opp_table.opp_count()?; 117 + 118 + // Set sharing cpus for fallback scenario. 119 + if fallback { 120 + mask.setall(); 121 + opp_table.set_sharing_cpus(&mut mask)?; 122 + } 123 + 124 + let mut transition_latency = opp_table.max_transition_latency_ns() as u32; 125 + if transition_latency == 0 { 126 + transition_latency = cpufreq::ETERNAL_LATENCY_NS; 127 + } 128 + 129 + policy 130 + .set_dvfs_possible_from_any_cpu(true) 131 + .set_suspend_freq(opp_table.suspend_freq()) 132 + .set_transition_latency_ns(transition_latency); 133 + 134 + let freq_table = opp_table.cpufreq_table()?; 135 + // SAFETY: The `freq_table` is not dropped while it is getting used by the C code. 136 + unsafe { policy.set_freq_table(&freq_table) }; 137 + 138 + // SAFETY: The returned `clk` is not dropped while it is getting used by the C code. 139 + let clk = unsafe { policy.set_clk(dev, None)? }; 140 + 141 + mask.copy(policy.cpus()); 142 + 143 + Ok(Arc::new( 144 + CPUFreqDTDevice { 145 + opp_table, 146 + freq_table, 147 + _mask: mask, 148 + _token: token, 149 + _clk: clk, 150 + }, 151 + GFP_KERNEL, 152 + )?) 153 + } 154 + 155 + fn exit(_policy: &mut cpufreq::Policy, _data: Option<Self::PData>) -> Result { 156 + Ok(()) 157 + } 158 + 159 + fn online(_policy: &mut cpufreq::Policy) -> Result { 160 + // We did light-weight tear down earlier, nothing to do here. 161 + Ok(()) 162 + } 163 + 164 + fn offline(_policy: &mut cpufreq::Policy) -> Result { 165 + // Preserve policy->data and don't free resources on light-weight 166 + // tear down. 167 + Ok(()) 168 + } 169 + 170 + fn suspend(policy: &mut cpufreq::Policy) -> Result { 171 + policy.generic_suspend() 172 + } 173 + 174 + fn verify(data: &mut cpufreq::PolicyData) -> Result { 175 + data.generic_verify() 176 + } 177 + 178 + fn target_index(policy: &mut cpufreq::Policy, index: cpufreq::TableIndex) -> Result { 179 + let Some(data) = policy.data::<Self::PData>() else { 180 + return Err(ENOENT); 181 + }; 182 + 183 + let freq = data.freq_table.freq(index)?; 184 + data.opp_table.set_rate(freq) 185 + } 186 + 187 + fn get(policy: &mut cpufreq::Policy) -> Result<u32> { 188 + policy.generic_get() 189 + } 190 + 191 + fn set_boost(_policy: &mut cpufreq::Policy, _state: i32) -> Result { 192 + Ok(()) 193 + } 194 + 195 + fn register_em(policy: &mut cpufreq::Policy) { 196 + policy.register_em_opp() 197 + } 198 + } 199 + 200 + kernel::of_device_table!( 201 + OF_TABLE, 202 + MODULE_OF_TABLE, 203 + <CPUFreqDTDriver as platform::Driver>::IdInfo, 204 + [(of::DeviceId::new(c_str!("operating-points-v2")), ())] 205 + ); 206 + 207 + impl platform::Driver for CPUFreqDTDriver { 208 + type IdInfo = (); 209 + const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE); 210 + 211 + fn probe( 212 + pdev: &platform::Device<Core>, 213 + _id_info: Option<&Self::IdInfo>, 214 + ) -> Result<Pin<KBox<Self>>> { 215 + cpufreq::Registration::<CPUFreqDTDriver>::new_foreign_owned(pdev.as_ref())?; 216 + Ok(KBox::new(Self {}, GFP_KERNEL)?.into()) 217 + } 218 + } 219 + 220 + module_platform_driver! { 221 + type: CPUFreqDTDriver, 222 + name: "cpufreq-dt", 223 + author: "Viresh Kumar <viresh.kumar@linaro.org>", 224 + description: "Generic CPUFreq DT driver", 225 + license: "GPL v2", 226 + }
+7 -3
include/linux/cpufreq.h
··· 170 170 struct notifier_block nb_max; 171 171 }; 172 172 173 + DEFINE_GUARD(cpufreq_policy_write, struct cpufreq_policy *, 174 + down_write(&_T->rwsem), up_write(&_T->rwsem)) 175 + 176 + DEFINE_GUARD(cpufreq_policy_read, struct cpufreq_policy *, 177 + down_read(&_T->rwsem), up_read(&_T->rwsem)) 178 + 173 179 /* 174 180 * Used for passing new cpufreq policy data to the cpufreq driver's ->verify() 175 181 * callback for sanitization. That callback is only expected to modify the min ··· 241 235 242 236 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); 243 237 244 - struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu); 245 - void cpufreq_cpu_release(struct cpufreq_policy *policy); 246 238 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); 247 239 void refresh_frequency_limits(struct cpufreq_policy *policy); 248 240 void cpufreq_update_policy(unsigned int cpu); ··· 399 395 unsigned int (*get)(unsigned int cpu); 400 396 401 397 /* Called to update policy limits on firmware notifications. */ 402 - void (*update_limits)(unsigned int cpu); 398 + void (*update_limits)(struct cpufreq_policy *policy); 403 399 404 400 /* optional */ 405 401 int (*bios_limit)(int cpu, unsigned int *limit);
+4
rust/bindings/bindings_helper.h
··· 10 10 #include <linux/blk-mq.h> 11 11 #include <linux/blk_types.h> 12 12 #include <linux/blkdev.h> 13 + #include <linux/clk.h> 14 + #include <linux/cpu.h> 15 + #include <linux/cpufreq.h> 13 16 #include <linux/cpumask.h> 14 17 #include <linux/cred.h> 15 18 #include <linux/device/faux.h> ··· 31 28 #include <linux/phy.h> 32 29 #include <linux/pid_namespace.h> 33 30 #include <linux/platform_device.h> 31 + #include <linux/pm_opp.h> 34 32 #include <linux/poll.h> 35 33 #include <linux/property.h> 36 34 #include <linux/refcount.h>
+66
rust/helpers/clk.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/clk.h> 4 + 5 + /* 6 + * The "inline" implementation of below helpers are only available when 7 + * CONFIG_HAVE_CLK or CONFIG_HAVE_CLK_PREPARE aren't set. 8 + */ 9 + #ifndef CONFIG_HAVE_CLK 10 + struct clk *rust_helper_clk_get(struct device *dev, const char *id) 11 + { 12 + return clk_get(dev, id); 13 + } 14 + 15 + void rust_helper_clk_put(struct clk *clk) 16 + { 17 + clk_put(clk); 18 + } 19 + 20 + int rust_helper_clk_enable(struct clk *clk) 21 + { 22 + return clk_enable(clk); 23 + } 24 + 25 + void rust_helper_clk_disable(struct clk *clk) 26 + { 27 + clk_disable(clk); 28 + } 29 + 30 + unsigned long rust_helper_clk_get_rate(struct clk *clk) 31 + { 32 + return clk_get_rate(clk); 33 + } 34 + 35 + int rust_helper_clk_set_rate(struct clk *clk, unsigned long rate) 36 + { 37 + return clk_set_rate(clk, rate); 38 + } 39 + #endif 40 + 41 + #ifndef CONFIG_HAVE_CLK_PREPARE 42 + int rust_helper_clk_prepare(struct clk *clk) 43 + { 44 + return clk_prepare(clk); 45 + } 46 + 47 + void rust_helper_clk_unprepare(struct clk *clk) 48 + { 49 + clk_unprepare(clk); 50 + } 51 + #endif 52 + 53 + struct clk *rust_helper_clk_get_optional(struct device *dev, const char *id) 54 + { 55 + return clk_get_optional(dev, id); 56 + } 57 + 58 + int rust_helper_clk_prepare_enable(struct clk *clk) 59 + { 60 + return clk_prepare_enable(clk); 61 + } 62 + 63 + void rust_helper_clk_disable_unprepare(struct clk *clk) 64 + { 65 + clk_disable_unprepare(clk); 66 + }
+10
rust/helpers/cpufreq.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/cpufreq.h> 4 + 5 + #ifdef CONFIG_CPU_FREQ 6 + void rust_helper_cpufreq_register_em_with_opp(struct cpufreq_policy *policy) 7 + { 8 + cpufreq_register_em_with_opp(policy); 9 + } 10 + #endif
+25
rust/helpers/cpumask.c
··· 7 7 cpumask_set_cpu(cpu, dstp); 8 8 } 9 9 10 + void rust_helper___cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) 11 + { 12 + __cpumask_set_cpu(cpu, dstp); 13 + } 14 + 10 15 void rust_helper_cpumask_clear_cpu(int cpu, struct cpumask *dstp) 11 16 { 12 17 cpumask_clear_cpu(cpu, dstp); 13 18 } 14 19 20 + void rust_helper___cpumask_clear_cpu(int cpu, struct cpumask *dstp) 21 + { 22 + __cpumask_clear_cpu(cpu, dstp); 23 + } 24 + 25 + bool rust_helper_cpumask_test_cpu(int cpu, struct cpumask *srcp) 26 + { 27 + return cpumask_test_cpu(cpu, srcp); 28 + } 29 + 15 30 void rust_helper_cpumask_setall(struct cpumask *dstp) 16 31 { 17 32 cpumask_setall(dstp); 33 + } 34 + 35 + bool rust_helper_cpumask_empty(struct cpumask *srcp) 36 + { 37 + return cpumask_empty(srcp); 38 + } 39 + 40 + bool rust_helper_cpumask_full(struct cpumask *srcp) 41 + { 42 + return cpumask_full(srcp); 18 43 } 19 44 20 45 unsigned int rust_helper_cpumask_weight(struct cpumask *srcp)
+2
rust/helpers/helpers.c
··· 11 11 #include "bug.c" 12 12 #include "build_assert.c" 13 13 #include "build_bug.c" 14 + #include "clk.c" 15 + #include "cpufreq.c" 14 16 #include "cpumask.c" 15 17 #include "cred.c" 16 18 #include "device.c"
+334
rust/kernel/clk.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + //! Clock abstractions. 4 + //! 5 + //! C header: [`include/linux/clk.h`](srctree/include/linux/clk.h) 6 + //! 7 + //! Reference: <https://docs.kernel.org/driver-api/clk.html> 8 + 9 + use crate::ffi::c_ulong; 10 + 11 + /// The frequency unit. 12 + /// 13 + /// Represents a frequency in hertz, wrapping a [`c_ulong`] value. 14 + /// 15 + /// ## Examples 16 + /// 17 + /// ``` 18 + /// use kernel::clk::Hertz; 19 + /// 20 + /// let hz = 1_000_000_000; 21 + /// let rate = Hertz(hz); 22 + /// 23 + /// assert_eq!(rate.as_hz(), hz); 24 + /// assert_eq!(rate, Hertz(hz)); 25 + /// assert_eq!(rate, Hertz::from_khz(hz / 1_000)); 26 + /// assert_eq!(rate, Hertz::from_mhz(hz / 1_000_000)); 27 + /// assert_eq!(rate, Hertz::from_ghz(hz / 1_000_000_000)); 28 + /// ``` 29 + #[derive(Copy, Clone, PartialEq, Eq, Debug)] 30 + pub struct Hertz(pub c_ulong); 31 + 32 + impl Hertz { 33 + /// Create a new instance from kilohertz (kHz) 34 + pub fn from_khz(khz: c_ulong) -> Self { 35 + Self(khz * 1_000) 36 + } 37 + 38 + /// Create a new instance from megahertz (MHz) 39 + pub fn from_mhz(mhz: c_ulong) -> Self { 40 + Self(mhz * 1_000_000) 41 + } 42 + 43 + /// Create a new instance from gigahertz (GHz) 44 + pub fn from_ghz(ghz: c_ulong) -> Self { 45 + Self(ghz * 1_000_000_000) 46 + } 47 + 48 + /// Get the frequency in hertz 49 + pub fn as_hz(&self) -> c_ulong { 50 + self.0 51 + } 52 + 53 + /// Get the frequency in kilohertz 54 + pub fn as_khz(&self) -> c_ulong { 55 + self.0 / 1_000 56 + } 57 + 58 + /// Get the frequency in megahertz 59 + pub fn as_mhz(&self) -> c_ulong { 60 + self.0 / 1_000_000 61 + } 62 + 63 + /// Get the frequency in gigahertz 64 + pub fn as_ghz(&self) -> c_ulong { 65 + self.0 / 1_000_000_000 66 + } 67 + } 68 + 69 + impl From<Hertz> for c_ulong { 70 + fn from(freq: Hertz) -> Self { 71 + freq.0 72 + } 73 + } 74 + 75 + #[cfg(CONFIG_COMMON_CLK)] 76 + mod common_clk { 77 + use super::Hertz; 78 + use crate::{ 79 + device::Device, 80 + error::{from_err_ptr, to_result, Result}, 81 + prelude::*, 82 + }; 83 + 84 + use core::{ops::Deref, ptr}; 85 + 86 + /// A reference-counted clock. 87 + /// 88 + /// Rust abstraction for the C [`struct clk`]. 89 + /// 90 + /// # Invariants 91 + /// 92 + /// A [`Clk`] instance holds either a pointer to a valid [`struct clk`] created by the C 93 + /// portion of the kernel or a NULL pointer. 94 + /// 95 + /// Instances of this type are reference-counted. Calling [`Clk::get`] ensures that the 96 + /// allocation remains valid for the lifetime of the [`Clk`]. 97 + /// 98 + /// ## Examples 99 + /// 100 + /// The following example demonstrates how to obtain and configure a clock for a device. 101 + /// 102 + /// ``` 103 + /// use kernel::c_str; 104 + /// use kernel::clk::{Clk, Hertz}; 105 + /// use kernel::device::Device; 106 + /// use kernel::error::Result; 107 + /// 108 + /// fn configure_clk(dev: &Device) -> Result { 109 + /// let clk = Clk::get(dev, Some(c_str!("apb_clk")))?; 110 + /// 111 + /// clk.prepare_enable()?; 112 + /// 113 + /// let expected_rate = Hertz::from_ghz(1); 114 + /// 115 + /// if clk.rate() != expected_rate { 116 + /// clk.set_rate(expected_rate)?; 117 + /// } 118 + /// 119 + /// clk.disable_unprepare(); 120 + /// Ok(()) 121 + /// } 122 + /// ``` 123 + /// 124 + /// [`struct clk`]: https://docs.kernel.org/driver-api/clk.html 125 + #[repr(transparent)] 126 + pub struct Clk(*mut bindings::clk); 127 + 128 + impl Clk { 129 + /// Gets [`Clk`] corresponding to a [`Device`] and a connection id. 130 + /// 131 + /// Equivalent to the kernel's [`clk_get`] API. 132 + /// 133 + /// [`clk_get`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_get 134 + pub fn get(dev: &Device, name: Option<&CStr>) -> Result<Self> { 135 + let con_id = if let Some(name) = name { 136 + name.as_ptr() 137 + } else { 138 + ptr::null() 139 + }; 140 + 141 + // SAFETY: It is safe to call [`clk_get`] for a valid device pointer. 142 + // 143 + // INVARIANT: The reference-count is decremented when [`Clk`] goes out of scope. 144 + Ok(Self(from_err_ptr(unsafe { 145 + bindings::clk_get(dev.as_raw(), con_id) 146 + })?)) 147 + } 148 + 149 + /// Obtain the raw [`struct clk`] pointer. 150 + #[inline] 151 + pub fn as_raw(&self) -> *mut bindings::clk { 152 + self.0 153 + } 154 + 155 + /// Enable the clock. 156 + /// 157 + /// Equivalent to the kernel's [`clk_enable`] API. 158 + /// 159 + /// [`clk_enable`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_enable 160 + #[inline] 161 + pub fn enable(&self) -> Result { 162 + // SAFETY: By the type invariants, self.as_raw() is a valid argument for 163 + // [`clk_enable`]. 164 + to_result(unsafe { bindings::clk_enable(self.as_raw()) }) 165 + } 166 + 167 + /// Disable the clock. 168 + /// 169 + /// Equivalent to the kernel's [`clk_disable`] API. 170 + /// 171 + /// [`clk_disable`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_disable 172 + #[inline] 173 + pub fn disable(&self) { 174 + // SAFETY: By the type invariants, self.as_raw() is a valid argument for 175 + // [`clk_disable`]. 176 + unsafe { bindings::clk_disable(self.as_raw()) }; 177 + } 178 + 179 + /// Prepare the clock. 180 + /// 181 + /// Equivalent to the kernel's [`clk_prepare`] API. 182 + /// 183 + /// [`clk_prepare`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_prepare 184 + #[inline] 185 + pub fn prepare(&self) -> Result { 186 + // SAFETY: By the type invariants, self.as_raw() is a valid argument for 187 + // [`clk_prepare`]. 188 + to_result(unsafe { bindings::clk_prepare(self.as_raw()) }) 189 + } 190 + 191 + /// Unprepare the clock. 192 + /// 193 + /// Equivalent to the kernel's [`clk_unprepare`] API. 194 + /// 195 + /// [`clk_unprepare`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_unprepare 196 + #[inline] 197 + pub fn unprepare(&self) { 198 + // SAFETY: By the type invariants, self.as_raw() is a valid argument for 199 + // [`clk_unprepare`]. 200 + unsafe { bindings::clk_unprepare(self.as_raw()) }; 201 + } 202 + 203 + /// Prepare and enable the clock. 204 + /// 205 + /// Equivalent to calling [`Clk::prepare`] followed by [`Clk::enable`]. 206 + #[inline] 207 + pub fn prepare_enable(&self) -> Result { 208 + // SAFETY: By the type invariants, self.as_raw() is a valid argument for 209 + // [`clk_prepare_enable`]. 210 + to_result(unsafe { bindings::clk_prepare_enable(self.as_raw()) }) 211 + } 212 + 213 + /// Disable and unprepare the clock. 214 + /// 215 + /// Equivalent to calling [`Clk::disable`] followed by [`Clk::unprepare`]. 216 + #[inline] 217 + pub fn disable_unprepare(&self) { 218 + // SAFETY: By the type invariants, self.as_raw() is a valid argument for 219 + // [`clk_disable_unprepare`]. 220 + unsafe { bindings::clk_disable_unprepare(self.as_raw()) }; 221 + } 222 + 223 + /// Get clock's rate. 224 + /// 225 + /// Equivalent to the kernel's [`clk_get_rate`] API. 226 + /// 227 + /// [`clk_get_rate`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_get_rate 228 + #[inline] 229 + pub fn rate(&self) -> Hertz { 230 + // SAFETY: By the type invariants, self.as_raw() is a valid argument for 231 + // [`clk_get_rate`]. 232 + Hertz(unsafe { bindings::clk_get_rate(self.as_raw()) }) 233 + } 234 + 235 + /// Set clock's rate. 236 + /// 237 + /// Equivalent to the kernel's [`clk_set_rate`] API. 238 + /// 239 + /// [`clk_set_rate`]: https://docs.kernel.org/core-api/kernel-api.html#c.clk_set_rate 240 + #[inline] 241 + pub fn set_rate(&self, rate: Hertz) -> Result { 242 + // SAFETY: By the type invariants, self.as_raw() is a valid argument for 243 + // [`clk_set_rate`]. 244 + to_result(unsafe { bindings::clk_set_rate(self.as_raw(), rate.as_hz()) }) 245 + } 246 + } 247 + 248 + impl Drop for Clk { 249 + fn drop(&mut self) { 250 + // SAFETY: By the type invariants, self.as_raw() is a valid argument for [`clk_put`]. 251 + unsafe { bindings::clk_put(self.as_raw()) }; 252 + } 253 + } 254 + 255 + /// A reference-counted optional clock. 256 + /// 257 + /// A lightweight wrapper around an optional [`Clk`]. An [`OptionalClk`] represents a [`Clk`] 258 + /// that a driver can function without but may improve performance or enable additional 259 + /// features when available. 260 + /// 261 + /// # Invariants 262 + /// 263 + /// An [`OptionalClk`] instance encapsulates a [`Clk`] with either a valid [`struct clk`] or 264 + /// `NULL` pointer. 265 + /// 266 + /// Instances of this type are reference-counted. Calling [`OptionalClk::get`] ensures that the 267 + /// allocation remains valid for the lifetime of the [`OptionalClk`]. 268 + /// 269 + /// ## Examples 270 + /// 271 + /// The following example demonstrates how to obtain and configure an optional clock for a 272 + /// device. The code functions correctly whether or not the clock is available. 273 + /// 274 + /// ``` 275 + /// use kernel::c_str; 276 + /// use kernel::clk::{OptionalClk, Hertz}; 277 + /// use kernel::device::Device; 278 + /// use kernel::error::Result; 279 + /// 280 + /// fn configure_clk(dev: &Device) -> Result { 281 + /// let clk = OptionalClk::get(dev, Some(c_str!("apb_clk")))?; 282 + /// 283 + /// clk.prepare_enable()?; 284 + /// 285 + /// let expected_rate = Hertz::from_ghz(1); 286 + /// 287 + /// if clk.rate() != expected_rate { 288 + /// clk.set_rate(expected_rate)?; 289 + /// } 290 + /// 291 + /// clk.disable_unprepare(); 292 + /// Ok(()) 293 + /// } 294 + /// ``` 295 + /// 296 + /// [`struct clk`]: https://docs.kernel.org/driver-api/clk.html 297 + pub struct OptionalClk(Clk); 298 + 299 + impl OptionalClk { 300 + /// Gets [`OptionalClk`] corresponding to a [`Device`] and a connection id. 301 + /// 302 + /// Equivalent to the kernel's [`clk_get_optional`] API. 303 + /// 304 + /// [`clk_get_optional`]: 305 + /// https://docs.kernel.org/core-api/kernel-api.html#c.clk_get_optional 306 + pub fn get(dev: &Device, name: Option<&CStr>) -> Result<Self> { 307 + let con_id = if let Some(name) = name { 308 + name.as_ptr() 309 + } else { 310 + ptr::null() 311 + }; 312 + 313 + // SAFETY: It is safe to call [`clk_get_optional`] for a valid device pointer. 314 + // 315 + // INVARIANT: The reference-count is decremented when [`OptionalClk`] goes out of 316 + // scope. 317 + Ok(Self(Clk(from_err_ptr(unsafe { 318 + bindings::clk_get_optional(dev.as_raw(), con_id) 319 + })?))) 320 + } 321 + } 322 + 323 + // Make [`OptionalClk`] behave like [`Clk`]. 324 + impl Deref for OptionalClk { 325 + type Target = Clk; 326 + 327 + fn deref(&self) -> &Clk { 328 + &self.0 329 + } 330 + } 331 + } 332 + 333 + #[cfg(CONFIG_COMMON_CLK)] 334 + pub use common_clk::*;
+30
rust/kernel/cpu.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + //! Generic CPU definitions. 4 + //! 5 + //! C header: [`include/linux/cpu.h`](srctree/include/linux/cpu.h) 6 + 7 + use crate::{bindings, device::Device, error::Result, prelude::ENODEV}; 8 + 9 + /// Creates a new instance of CPU's device. 10 + /// 11 + /// # Safety 12 + /// 13 + /// Reference counting is not implemented for the CPU device in the C code. When a CPU is 14 + /// hot-unplugged, the corresponding CPU device is unregistered, but its associated memory 15 + /// is not freed. 16 + /// 17 + /// Callers must ensure that the CPU device is not used after it has been unregistered. 18 + /// This can be achieved, for example, by registering a CPU hotplug notifier and removing 19 + /// any references to the CPU device within the notifier's callback. 20 + pub unsafe fn from_cpu(cpu: u32) -> Result<&'static Device> { 21 + // SAFETY: It is safe to call `get_cpu_device()` for any CPU. 22 + let ptr = unsafe { bindings::get_cpu_device(cpu) }; 23 + if ptr.is_null() { 24 + return Err(ENODEV); 25 + } 26 + 27 + // SAFETY: The pointer returned by `get_cpu_device()`, if not `NULL`, is a valid pointer to 28 + // a `struct device` and is never freed by the C code. 29 + Ok(unsafe { Device::as_ref(ptr) }) 30 + }
+1321
rust/kernel/cpufreq.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + //! CPU frequency scaling. 4 + //! 5 + //! This module provides rust abstractions for interacting with the cpufreq subsystem. 6 + //! 7 + //! C header: [`include/linux/cpufreq.h`](srctree/include/linux/cpufreq.h) 8 + //! 9 + //! Reference: <https://docs.kernel.org/admin-guide/pm/cpufreq.html> 10 + 11 + use crate::{ 12 + clk::Hertz, 13 + cpumask, 14 + device::{Bound, Device}, 15 + devres::Devres, 16 + error::{code::*, from_err_ptr, from_result, to_result, Result, VTABLE_DEFAULT_ERROR}, 17 + ffi::{c_char, c_ulong}, 18 + prelude::*, 19 + types::ForeignOwnable, 20 + types::Opaque, 21 + }; 22 + 23 + #[cfg(CONFIG_COMMON_CLK)] 24 + use crate::clk::Clk; 25 + 26 + use core::{ 27 + cell::UnsafeCell, 28 + marker::PhantomData, 29 + mem::MaybeUninit, 30 + ops::{Deref, DerefMut}, 31 + pin::Pin, 32 + ptr, 33 + }; 34 + 35 + use macros::vtable; 36 + 37 + /// Maximum length of CPU frequency driver's name. 38 + const CPUFREQ_NAME_LEN: usize = bindings::CPUFREQ_NAME_LEN as usize; 39 + 40 + /// Default transition latency value in nanoseconds. 41 + pub const ETERNAL_LATENCY_NS: u32 = bindings::CPUFREQ_ETERNAL as u32; 42 + 43 + /// CPU frequency driver flags. 44 + pub mod flags { 45 + /// Driver needs to update internal limits even if frequency remains unchanged. 46 + pub const NEED_UPDATE_LIMITS: u16 = 1 << 0; 47 + 48 + /// Platform where constants like `loops_per_jiffy` are unaffected by frequency changes. 49 + pub const CONST_LOOPS: u16 = 1 << 1; 50 + 51 + /// Register driver as a thermal cooling device automatically. 52 + pub const IS_COOLING_DEV: u16 = 1 << 2; 53 + 54 + /// Supports multiple clock domains with per-policy governors in `cpu/cpuN/cpufreq/`. 55 + pub const HAVE_GOVERNOR_PER_POLICY: u16 = 1 << 3; 56 + 57 + /// Allows post-change notifications outside of the `target()` routine. 58 + pub const ASYNC_NOTIFICATION: u16 = 1 << 4; 59 + 60 + /// Ensure CPU starts at a valid frequency from the driver's freq-table. 61 + pub const NEED_INITIAL_FREQ_CHECK: u16 = 1 << 5; 62 + 63 + /// Disallow governors with `dynamic_switching` capability. 64 + pub const NO_AUTO_DYNAMIC_SWITCHING: u16 = 1 << 6; 65 + } 66 + 67 + /// Relations from the C code. 68 + const CPUFREQ_RELATION_L: u32 = 0; 69 + const CPUFREQ_RELATION_H: u32 = 1; 70 + const CPUFREQ_RELATION_C: u32 = 2; 71 + 72 + /// Can be used with any of the above values. 73 + const CPUFREQ_RELATION_E: u32 = 1 << 2; 74 + 75 + /// CPU frequency selection relations. 76 + /// 77 + /// CPU frequency selection relations, each optionally marked as "efficient". 78 + #[derive(Copy, Clone, Debug, Eq, PartialEq)] 79 + pub enum Relation { 80 + /// Select the lowest frequency at or above target. 81 + Low(bool), 82 + /// Select the highest frequency below or at target. 83 + High(bool), 84 + /// Select the closest frequency to the target. 85 + Close(bool), 86 + } 87 + 88 + impl Relation { 89 + // Construct from a C-compatible `u32` value. 90 + fn new(val: u32) -> Result<Self> { 91 + let efficient = val & CPUFREQ_RELATION_E != 0; 92 + 93 + Ok(match val & !CPUFREQ_RELATION_E { 94 + CPUFREQ_RELATION_L => Self::Low(efficient), 95 + CPUFREQ_RELATION_H => Self::High(efficient), 96 + CPUFREQ_RELATION_C => Self::Close(efficient), 97 + _ => return Err(EINVAL), 98 + }) 99 + } 100 + } 101 + 102 + impl From<Relation> for u32 { 103 + // Convert to a C-compatible `u32` value. 104 + fn from(rel: Relation) -> Self { 105 + let (mut val, efficient) = match rel { 106 + Relation::Low(e) => (CPUFREQ_RELATION_L, e), 107 + Relation::High(e) => (CPUFREQ_RELATION_H, e), 108 + Relation::Close(e) => (CPUFREQ_RELATION_C, e), 109 + }; 110 + 111 + if efficient { 112 + val |= CPUFREQ_RELATION_E; 113 + } 114 + 115 + val 116 + } 117 + } 118 + 119 + /// Policy data. 120 + /// 121 + /// Rust abstraction for the C `struct cpufreq_policy_data`. 122 + /// 123 + /// # Invariants 124 + /// 125 + /// A [`PolicyData`] instance always corresponds to a valid C `struct cpufreq_policy_data`. 126 + /// 127 + /// The callers must ensure that the `struct cpufreq_policy_data` is valid for access and remains 128 + /// valid for the lifetime of the returned reference. 129 + #[repr(transparent)] 130 + pub struct PolicyData(Opaque<bindings::cpufreq_policy_data>); 131 + 132 + impl PolicyData { 133 + /// Creates a mutable reference to an existing `struct cpufreq_policy_data` pointer. 134 + /// 135 + /// # Safety 136 + /// 137 + /// The caller must ensure that `ptr` is valid for writing and remains valid for the lifetime 138 + /// of the returned reference. 139 + #[inline] 140 + pub unsafe fn from_raw_mut<'a>(ptr: *mut bindings::cpufreq_policy_data) -> &'a mut Self { 141 + // SAFETY: Guaranteed by the safety requirements of the function. 142 + // 143 + // INVARIANT: The caller ensures that `ptr` is valid for writing and remains valid for the 144 + // lifetime of the returned reference. 145 + unsafe { &mut *ptr.cast() } 146 + } 147 + 148 + /// Returns a raw pointer to the underlying C `cpufreq_policy_data`. 149 + #[inline] 150 + pub fn as_raw(&self) -> *mut bindings::cpufreq_policy_data { 151 + let this: *const Self = self; 152 + this.cast_mut().cast() 153 + } 154 + 155 + /// Wrapper for `cpufreq_generic_frequency_table_verify`. 156 + #[inline] 157 + pub fn generic_verify(&self) -> Result { 158 + // SAFETY: By the type invariant, the pointer stored in `self` is valid. 159 + to_result(unsafe { bindings::cpufreq_generic_frequency_table_verify(self.as_raw()) }) 160 + } 161 + } 162 + 163 + /// The frequency table index. 164 + /// 165 + /// Represents index with a frequency table. 166 + /// 167 + /// # Invariants 168 + /// 169 + /// The index must correspond to a valid entry in the [`Table`] it is used for. 170 + #[derive(Copy, Clone, PartialEq, Eq, Debug)] 171 + pub struct TableIndex(usize); 172 + 173 + impl TableIndex { 174 + /// Creates an instance of [`TableIndex`]. 175 + /// 176 + /// # Safety 177 + /// 178 + /// The caller must ensure that `index` correspond to a valid entry in the [`Table`] it is used 179 + /// for. 180 + pub unsafe fn new(index: usize) -> Self { 181 + // INVARIANT: The caller ensures that `index` correspond to a valid entry in the [`Table`]. 182 + Self(index) 183 + } 184 + } 185 + 186 + impl From<TableIndex> for usize { 187 + #[inline] 188 + fn from(index: TableIndex) -> Self { 189 + index.0 190 + } 191 + } 192 + 193 + /// CPU frequency table. 194 + /// 195 + /// Rust abstraction for the C `struct cpufreq_frequency_table`. 196 + /// 197 + /// # Invariants 198 + /// 199 + /// A [`Table`] instance always corresponds to a valid C `struct cpufreq_frequency_table`. 200 + /// 201 + /// The callers must ensure that the `struct cpufreq_frequency_table` is valid for access and 202 + /// remains valid for the lifetime of the returned reference. 203 + /// 204 + /// ## Examples 205 + /// 206 + /// The following example demonstrates how to read a frequency value from [`Table`]. 207 + /// 208 + /// ``` 209 + /// use kernel::cpufreq::{Policy, TableIndex}; 210 + /// 211 + /// fn show_freq(policy: &Policy) -> Result { 212 + /// let table = policy.freq_table()?; 213 + /// 214 + /// // SAFETY: Index is a valid entry in the table. 215 + /// let index = unsafe { TableIndex::new(0) }; 216 + /// 217 + /// pr_info!("The frequency at index 0 is: {:?}\n", table.freq(index)?); 218 + /// pr_info!("The flags at index 0 is: {}\n", table.flags(index)); 219 + /// pr_info!("The data at index 0 is: {}\n", table.data(index)); 220 + /// Ok(()) 221 + /// } 222 + /// ``` 223 + #[repr(transparent)] 224 + pub struct Table(Opaque<bindings::cpufreq_frequency_table>); 225 + 226 + impl Table { 227 + /// Creates a reference to an existing C `struct cpufreq_frequency_table` pointer. 228 + /// 229 + /// # Safety 230 + /// 231 + /// The caller must ensure that `ptr` is valid for reading and remains valid for the lifetime 232 + /// of the returned reference. 233 + #[inline] 234 + pub unsafe fn from_raw<'a>(ptr: *const bindings::cpufreq_frequency_table) -> &'a Self { 235 + // SAFETY: Guaranteed by the safety requirements of the function. 236 + // 237 + // INVARIANT: The caller ensures that `ptr` is valid for reading and remains valid for the 238 + // lifetime of the returned reference. 239 + unsafe { &*ptr.cast() } 240 + } 241 + 242 + /// Returns the raw mutable pointer to the C `struct cpufreq_frequency_table`. 243 + #[inline] 244 + pub fn as_raw(&self) -> *mut bindings::cpufreq_frequency_table { 245 + let this: *const Self = self; 246 + this.cast_mut().cast() 247 + } 248 + 249 + /// Returns frequency at `index` in the [`Table`]. 250 + #[inline] 251 + pub fn freq(&self, index: TableIndex) -> Result<Hertz> { 252 + // SAFETY: By the type invariant, the pointer stored in `self` is valid and `index` is 253 + // guaranteed to be valid by its safety requirements. 254 + Ok(Hertz::from_khz(unsafe { 255 + (*self.as_raw().add(index.into())).frequency.try_into()? 256 + })) 257 + } 258 + 259 + /// Returns flags at `index` in the [`Table`]. 260 + #[inline] 261 + pub fn flags(&self, index: TableIndex) -> u32 { 262 + // SAFETY: By the type invariant, the pointer stored in `self` is valid and `index` is 263 + // guaranteed to be valid by its safety requirements. 264 + unsafe { (*self.as_raw().add(index.into())).flags } 265 + } 266 + 267 + /// Returns data at `index` in the [`Table`]. 268 + #[inline] 269 + pub fn data(&self, index: TableIndex) -> u32 { 270 + // SAFETY: By the type invariant, the pointer stored in `self` is valid and `index` is 271 + // guaranteed to be valid by its safety requirements. 272 + unsafe { (*self.as_raw().add(index.into())).driver_data } 273 + } 274 + } 275 + 276 + /// CPU frequency table owned and pinned in memory, created from a [`TableBuilder`]. 277 + pub struct TableBox { 278 + entries: Pin<KVec<bindings::cpufreq_frequency_table>>, 279 + } 280 + 281 + impl TableBox { 282 + /// Constructs a new [`TableBox`] from a [`KVec`] of entries. 283 + /// 284 + /// # Errors 285 + /// 286 + /// Returns `EINVAL` if the entries list is empty. 287 + #[inline] 288 + fn new(entries: KVec<bindings::cpufreq_frequency_table>) -> Result<Self> { 289 + if entries.is_empty() { 290 + return Err(EINVAL); 291 + } 292 + 293 + Ok(Self { 294 + // Pin the entries to memory, since we are passing its pointer to the C code. 295 + entries: Pin::new(entries), 296 + }) 297 + } 298 + 299 + /// Returns a raw pointer to the underlying C `cpufreq_frequency_table`. 300 + #[inline] 301 + fn as_raw(&self) -> *const bindings::cpufreq_frequency_table { 302 + // The pointer is valid until the table gets dropped. 303 + self.entries.as_ptr() 304 + } 305 + } 306 + 307 + impl Deref for TableBox { 308 + type Target = Table; 309 + 310 + fn deref(&self) -> &Self::Target { 311 + // SAFETY: The caller owns TableBox, it is safe to deref. 312 + unsafe { Self::Target::from_raw(self.as_raw()) } 313 + } 314 + } 315 + 316 + /// CPU frequency table builder. 317 + /// 318 + /// This is used by the CPU frequency drivers to build a frequency table dynamically. 319 + /// 320 + /// ## Examples 321 + /// 322 + /// The following example demonstrates how to create a CPU frequency table. 323 + /// 324 + /// ``` 325 + /// use kernel::cpufreq::{TableBuilder, TableIndex}; 326 + /// use kernel::clk::Hertz; 327 + /// 328 + /// let mut builder = TableBuilder::new(); 329 + /// 330 + /// // Adds few entries to the table. 331 + /// builder.add(Hertz::from_mhz(700), 0, 1).unwrap(); 332 + /// builder.add(Hertz::from_mhz(800), 2, 3).unwrap(); 333 + /// builder.add(Hertz::from_mhz(900), 4, 5).unwrap(); 334 + /// builder.add(Hertz::from_ghz(1), 6, 7).unwrap(); 335 + /// 336 + /// let table = builder.to_table().unwrap(); 337 + /// 338 + /// // SAFETY: Index values correspond to valid entries in the table. 339 + /// let (index0, index2) = unsafe { (TableIndex::new(0), TableIndex::new(2)) }; 340 + /// 341 + /// assert_eq!(table.freq(index0), Ok(Hertz::from_mhz(700))); 342 + /// assert_eq!(table.flags(index0), 0); 343 + /// assert_eq!(table.data(index0), 1); 344 + /// 345 + /// assert_eq!(table.freq(index2), Ok(Hertz::from_mhz(900))); 346 + /// assert_eq!(table.flags(index2), 4); 347 + /// assert_eq!(table.data(index2), 5); 348 + /// ``` 349 + #[derive(Default)] 350 + #[repr(transparent)] 351 + pub struct TableBuilder { 352 + entries: KVec<bindings::cpufreq_frequency_table>, 353 + } 354 + 355 + impl TableBuilder { 356 + /// Creates a new instance of [`TableBuilder`]. 357 + #[inline] 358 + pub fn new() -> Self { 359 + Self { 360 + entries: KVec::new(), 361 + } 362 + } 363 + 364 + /// Adds a new entry to the table. 365 + pub fn add(&mut self, freq: Hertz, flags: u32, driver_data: u32) -> Result { 366 + // Adds the new entry at the end of the vector. 367 + Ok(self.entries.push( 368 + bindings::cpufreq_frequency_table { 369 + flags, 370 + driver_data, 371 + frequency: freq.as_khz() as u32, 372 + }, 373 + GFP_KERNEL, 374 + )?) 375 + } 376 + 377 + /// Consumes the [`TableBuilder`] and returns [`TableBox`]. 378 + pub fn to_table(mut self) -> Result<TableBox> { 379 + // Add last entry to the table. 380 + self.add(Hertz(c_ulong::MAX), 0, 0)?; 381 + 382 + TableBox::new(self.entries) 383 + } 384 + } 385 + 386 + /// CPU frequency policy. 387 + /// 388 + /// Rust abstraction for the C `struct cpufreq_policy`. 389 + /// 390 + /// # Invariants 391 + /// 392 + /// A [`Policy`] instance always corresponds to a valid C `struct cpufreq_policy`. 393 + /// 394 + /// The callers must ensure that the `struct cpufreq_policy` is valid for access and remains valid 395 + /// for the lifetime of the returned reference. 396 + /// 397 + /// ## Examples 398 + /// 399 + /// The following example demonstrates how to create a CPU frequency table. 400 + /// 401 + /// ``` 402 + /// use kernel::cpufreq::{ETERNAL_LATENCY_NS, Policy}; 403 + /// 404 + /// fn update_policy(policy: &mut Policy) { 405 + /// policy 406 + /// .set_dvfs_possible_from_any_cpu(true) 407 + /// .set_fast_switch_possible(true) 408 + /// .set_transition_latency_ns(ETERNAL_LATENCY_NS); 409 + /// 410 + /// pr_info!("The policy details are: {:?}\n", (policy.cpu(), policy.cur())); 411 + /// } 412 + /// ``` 413 + #[repr(transparent)] 414 + pub struct Policy(Opaque<bindings::cpufreq_policy>); 415 + 416 + impl Policy { 417 + /// Creates a reference to an existing `struct cpufreq_policy` pointer. 418 + /// 419 + /// # Safety 420 + /// 421 + /// The caller must ensure that `ptr` is valid for reading and remains valid for the lifetime 422 + /// of the returned reference. 423 + #[inline] 424 + pub unsafe fn from_raw<'a>(ptr: *const bindings::cpufreq_policy) -> &'a Self { 425 + // SAFETY: Guaranteed by the safety requirements of the function. 426 + // 427 + // INVARIANT: The caller ensures that `ptr` is valid for reading and remains valid for the 428 + // lifetime of the returned reference. 429 + unsafe { &*ptr.cast() } 430 + } 431 + 432 + /// Creates a mutable reference to an existing `struct cpufreq_policy` pointer. 433 + /// 434 + /// # Safety 435 + /// 436 + /// The caller must ensure that `ptr` is valid for writing and remains valid for the lifetime 437 + /// of the returned reference. 438 + #[inline] 439 + pub unsafe fn from_raw_mut<'a>(ptr: *mut bindings::cpufreq_policy) -> &'a mut Self { 440 + // SAFETY: Guaranteed by the safety requirements of the function. 441 + // 442 + // INVARIANT: The caller ensures that `ptr` is valid for writing and remains valid for the 443 + // lifetime of the returned reference. 444 + unsafe { &mut *ptr.cast() } 445 + } 446 + 447 + /// Returns a raw mutable pointer to the C `struct cpufreq_policy`. 448 + #[inline] 449 + fn as_raw(&self) -> *mut bindings::cpufreq_policy { 450 + let this: *const Self = self; 451 + this.cast_mut().cast() 452 + } 453 + 454 + #[inline] 455 + fn as_ref(&self) -> &bindings::cpufreq_policy { 456 + // SAFETY: By the type invariant, the pointer stored in `self` is valid. 457 + unsafe { &*self.as_raw() } 458 + } 459 + 460 + #[inline] 461 + fn as_mut_ref(&mut self) -> &mut bindings::cpufreq_policy { 462 + // SAFETY: By the type invariant, the pointer stored in `self` is valid. 463 + unsafe { &mut *self.as_raw() } 464 + } 465 + 466 + /// Returns the primary CPU for the [`Policy`]. 467 + #[inline] 468 + pub fn cpu(&self) -> u32 { 469 + self.as_ref().cpu 470 + } 471 + 472 + /// Returns the minimum frequency for the [`Policy`]. 473 + #[inline] 474 + pub fn min(&self) -> Hertz { 475 + Hertz::from_khz(self.as_ref().min as usize) 476 + } 477 + 478 + /// Set the minimum frequency for the [`Policy`]. 479 + #[inline] 480 + pub fn set_min(&mut self, min: Hertz) -> &mut Self { 481 + self.as_mut_ref().min = min.as_khz() as u32; 482 + self 483 + } 484 + 485 + /// Returns the maximum frequency for the [`Policy`]. 486 + #[inline] 487 + pub fn max(&self) -> Hertz { 488 + Hertz::from_khz(self.as_ref().max as usize) 489 + } 490 + 491 + /// Set the maximum frequency for the [`Policy`]. 492 + #[inline] 493 + pub fn set_max(&mut self, max: Hertz) -> &mut Self { 494 + self.as_mut_ref().max = max.as_khz() as u32; 495 + self 496 + } 497 + 498 + /// Returns the current frequency for the [`Policy`]. 499 + #[inline] 500 + pub fn cur(&self) -> Hertz { 501 + Hertz::from_khz(self.as_ref().cur as usize) 502 + } 503 + 504 + /// Returns the suspend frequency for the [`Policy`]. 505 + #[inline] 506 + pub fn suspend_freq(&self) -> Hertz { 507 + Hertz::from_khz(self.as_ref().suspend_freq as usize) 508 + } 509 + 510 + /// Sets the suspend frequency for the [`Policy`]. 511 + #[inline] 512 + pub fn set_suspend_freq(&mut self, freq: Hertz) -> &mut Self { 513 + self.as_mut_ref().suspend_freq = freq.as_khz() as u32; 514 + self 515 + } 516 + 517 + /// Provides a wrapper to the generic suspend routine. 518 + #[inline] 519 + pub fn generic_suspend(&mut self) -> Result { 520 + // SAFETY: By the type invariant, the pointer stored in `self` is valid. 521 + to_result(unsafe { bindings::cpufreq_generic_suspend(self.as_mut_ref()) }) 522 + } 523 + 524 + /// Provides a wrapper to the generic get routine. 525 + #[inline] 526 + pub fn generic_get(&self) -> Result<u32> { 527 + // SAFETY: By the type invariant, the pointer stored in `self` is valid. 528 + Ok(unsafe { bindings::cpufreq_generic_get(self.cpu()) }) 529 + } 530 + 531 + /// Provides a wrapper to the register with energy model using the OPP core. 532 + #[cfg(CONFIG_PM_OPP)] 533 + #[inline] 534 + pub fn register_em_opp(&mut self) { 535 + // SAFETY: By the type invariant, the pointer stored in `self` is valid. 536 + unsafe { bindings::cpufreq_register_em_with_opp(self.as_mut_ref()) }; 537 + } 538 + 539 + /// Gets [`cpumask::Cpumask`] for a cpufreq [`Policy`]. 540 + #[inline] 541 + pub fn cpus(&mut self) -> &mut cpumask::Cpumask { 542 + // SAFETY: The pointer to `cpus` is valid for writing and remains valid for the lifetime of 543 + // the returned reference. 544 + unsafe { cpumask::CpumaskVar::as_mut_ref(&mut self.as_mut_ref().cpus) } 545 + } 546 + 547 + /// Sets clock for the [`Policy`]. 548 + /// 549 + /// # Safety 550 + /// 551 + /// The caller must guarantee that the returned [`Clk`] is not dropped while it is getting used 552 + /// by the C code. 553 + #[cfg(CONFIG_COMMON_CLK)] 554 + pub unsafe fn set_clk(&mut self, dev: &Device, name: Option<&CStr>) -> Result<Clk> { 555 + let clk = Clk::get(dev, name)?; 556 + self.as_mut_ref().clk = clk.as_raw(); 557 + Ok(clk) 558 + } 559 + 560 + /// Allows / disallows frequency switching code to run on any CPU. 561 + #[inline] 562 + pub fn set_dvfs_possible_from_any_cpu(&mut self, val: bool) -> &mut Self { 563 + self.as_mut_ref().dvfs_possible_from_any_cpu = val; 564 + self 565 + } 566 + 567 + /// Returns if fast switching of frequencies is possible or not. 568 + #[inline] 569 + pub fn fast_switch_possible(&self) -> bool { 570 + self.as_ref().fast_switch_possible 571 + } 572 + 573 + /// Enables / disables fast frequency switching. 574 + #[inline] 575 + pub fn set_fast_switch_possible(&mut self, val: bool) -> &mut Self { 576 + self.as_mut_ref().fast_switch_possible = val; 577 + self 578 + } 579 + 580 + /// Sets transition latency (in nanoseconds) for the [`Policy`]. 581 + #[inline] 582 + pub fn set_transition_latency_ns(&mut self, latency_ns: u32) -> &mut Self { 583 + self.as_mut_ref().cpuinfo.transition_latency = latency_ns; 584 + self 585 + } 586 + 587 + /// Sets cpuinfo `min_freq`. 588 + #[inline] 589 + pub fn set_cpuinfo_min_freq(&mut self, min_freq: Hertz) -> &mut Self { 590 + self.as_mut_ref().cpuinfo.min_freq = min_freq.as_khz() as u32; 591 + self 592 + } 593 + 594 + /// Sets cpuinfo `max_freq`. 595 + #[inline] 596 + pub fn set_cpuinfo_max_freq(&mut self, max_freq: Hertz) -> &mut Self { 597 + self.as_mut_ref().cpuinfo.max_freq = max_freq.as_khz() as u32; 598 + self 599 + } 600 + 601 + /// Set `transition_delay_us`, i.e. the minimum time between successive frequency change 602 + /// requests. 603 + #[inline] 604 + pub fn set_transition_delay_us(&mut self, transition_delay_us: u32) -> &mut Self { 605 + self.as_mut_ref().transition_delay_us = transition_delay_us; 606 + self 607 + } 608 + 609 + /// Returns reference to the CPU frequency [`Table`] for the [`Policy`]. 610 + pub fn freq_table(&self) -> Result<&Table> { 611 + if self.as_ref().freq_table.is_null() { 612 + return Err(EINVAL); 613 + } 614 + 615 + // SAFETY: The `freq_table` is guaranteed to be valid for reading and remains valid for the 616 + // lifetime of the returned reference. 617 + Ok(unsafe { Table::from_raw(self.as_ref().freq_table) }) 618 + } 619 + 620 + /// Sets the CPU frequency [`Table`] for the [`Policy`]. 621 + /// 622 + /// # Safety 623 + /// 624 + /// The caller must guarantee that the [`Table`] is not dropped while it is getting used by the 625 + /// C code. 626 + #[inline] 627 + pub unsafe fn set_freq_table(&mut self, table: &Table) -> &mut Self { 628 + self.as_mut_ref().freq_table = table.as_raw(); 629 + self 630 + } 631 + 632 + /// Returns the [`Policy`]'s private data. 633 + pub fn data<T: ForeignOwnable>(&mut self) -> Option<<T>::Borrowed<'_>> { 634 + if self.as_ref().driver_data.is_null() { 635 + None 636 + } else { 637 + // SAFETY: The data is earlier set from [`set_data`]. 638 + Some(unsafe { T::borrow(self.as_ref().driver_data) }) 639 + } 640 + } 641 + 642 + /// Sets the private data of the [`Policy`] using a foreign-ownable wrapper. 643 + /// 644 + /// # Errors 645 + /// 646 + /// Returns `EBUSY` if private data is already set. 647 + fn set_data<T: ForeignOwnable>(&mut self, data: T) -> Result { 648 + if self.as_ref().driver_data.is_null() { 649 + // Transfer the ownership of the data to the foreign interface. 650 + self.as_mut_ref().driver_data = <T as ForeignOwnable>::into_foreign(data) as _; 651 + Ok(()) 652 + } else { 653 + Err(EBUSY) 654 + } 655 + } 656 + 657 + /// Clears and returns ownership of the private data. 658 + fn clear_data<T: ForeignOwnable>(&mut self) -> Option<T> { 659 + if self.as_ref().driver_data.is_null() { 660 + None 661 + } else { 662 + let data = Some( 663 + // SAFETY: The data is earlier set by us from [`set_data`]. It is safe to take 664 + // back the ownership of the data from the foreign interface. 665 + unsafe { <T as ForeignOwnable>::from_foreign(self.as_ref().driver_data) }, 666 + ); 667 + self.as_mut_ref().driver_data = ptr::null_mut(); 668 + data 669 + } 670 + } 671 + } 672 + 673 + /// CPU frequency policy created from a CPU number. 674 + /// 675 + /// This struct represents the CPU frequency policy obtained for a specific CPU, providing safe 676 + /// access to the underlying `cpufreq_policy` and ensuring proper cleanup when the `PolicyCpu` is 677 + /// dropped. 678 + struct PolicyCpu<'a>(&'a mut Policy); 679 + 680 + impl<'a> PolicyCpu<'a> { 681 + fn from_cpu(cpu: u32) -> Result<Self> { 682 + // SAFETY: It is safe to call `cpufreq_cpu_get` for any valid CPU. 683 + let ptr = from_err_ptr(unsafe { bindings::cpufreq_cpu_get(cpu) })?; 684 + 685 + Ok(Self( 686 + // SAFETY: The `ptr` is guaranteed to be valid and remains valid for the lifetime of 687 + // the returned reference. 688 + unsafe { Policy::from_raw_mut(ptr) }, 689 + )) 690 + } 691 + } 692 + 693 + impl<'a> Deref for PolicyCpu<'a> { 694 + type Target = Policy; 695 + 696 + fn deref(&self) -> &Self::Target { 697 + self.0 698 + } 699 + } 700 + 701 + impl<'a> DerefMut for PolicyCpu<'a> { 702 + fn deref_mut(&mut self) -> &mut Policy { 703 + self.0 704 + } 705 + } 706 + 707 + impl<'a> Drop for PolicyCpu<'a> { 708 + fn drop(&mut self) { 709 + // SAFETY: The underlying pointer is guaranteed to be valid for the lifetime of `self`. 710 + unsafe { bindings::cpufreq_cpu_put(self.0.as_raw()) }; 711 + } 712 + } 713 + 714 + /// CPU frequency driver. 715 + /// 716 + /// Implement this trait to provide a CPU frequency driver and its callbacks. 717 + /// 718 + /// Reference: <https://docs.kernel.org/cpu-freq/cpu-drivers.html> 719 + #[vtable] 720 + pub trait Driver { 721 + /// Driver's name. 722 + const NAME: &'static CStr; 723 + 724 + /// Driver's flags. 725 + const FLAGS: u16; 726 + 727 + /// Boost support. 728 + const BOOST_ENABLED: bool; 729 + 730 + /// Policy specific data. 731 + /// 732 + /// Require that `PData` implements `ForeignOwnable`. We guarantee to never move the underlying 733 + /// wrapped data structure. 734 + type PData: ForeignOwnable; 735 + 736 + /// Driver's `init` callback. 737 + fn init(policy: &mut Policy) -> Result<Self::PData>; 738 + 739 + /// Driver's `exit` callback. 740 + fn exit(_policy: &mut Policy, _data: Option<Self::PData>) -> Result { 741 + build_error!(VTABLE_DEFAULT_ERROR) 742 + } 743 + 744 + /// Driver's `online` callback. 745 + fn online(_policy: &mut Policy) -> Result { 746 + build_error!(VTABLE_DEFAULT_ERROR) 747 + } 748 + 749 + /// Driver's `offline` callback. 750 + fn offline(_policy: &mut Policy) -> Result { 751 + build_error!(VTABLE_DEFAULT_ERROR) 752 + } 753 + 754 + /// Driver's `suspend` callback. 755 + fn suspend(_policy: &mut Policy) -> Result { 756 + build_error!(VTABLE_DEFAULT_ERROR) 757 + } 758 + 759 + /// Driver's `resume` callback. 760 + fn resume(_policy: &mut Policy) -> Result { 761 + build_error!(VTABLE_DEFAULT_ERROR) 762 + } 763 + 764 + /// Driver's `ready` callback. 765 + fn ready(_policy: &mut Policy) { 766 + build_error!(VTABLE_DEFAULT_ERROR) 767 + } 768 + 769 + /// Driver's `verify` callback. 770 + fn verify(data: &mut PolicyData) -> Result; 771 + 772 + /// Driver's `setpolicy` callback. 773 + fn setpolicy(_policy: &mut Policy) -> Result { 774 + build_error!(VTABLE_DEFAULT_ERROR) 775 + } 776 + 777 + /// Driver's `target` callback. 778 + fn target(_policy: &mut Policy, _target_freq: u32, _relation: Relation) -> Result { 779 + build_error!(VTABLE_DEFAULT_ERROR) 780 + } 781 + 782 + /// Driver's `target_index` callback. 783 + fn target_index(_policy: &mut Policy, _index: TableIndex) -> Result { 784 + build_error!(VTABLE_DEFAULT_ERROR) 785 + } 786 + 787 + /// Driver's `fast_switch` callback. 788 + fn fast_switch(_policy: &mut Policy, _target_freq: u32) -> u32 { 789 + build_error!(VTABLE_DEFAULT_ERROR) 790 + } 791 + 792 + /// Driver's `adjust_perf` callback. 793 + fn adjust_perf(_policy: &mut Policy, _min_perf: usize, _target_perf: usize, _capacity: usize) { 794 + build_error!(VTABLE_DEFAULT_ERROR) 795 + } 796 + 797 + /// Driver's `get_intermediate` callback. 798 + fn get_intermediate(_policy: &mut Policy, _index: TableIndex) -> u32 { 799 + build_error!(VTABLE_DEFAULT_ERROR) 800 + } 801 + 802 + /// Driver's `target_intermediate` callback. 803 + fn target_intermediate(_policy: &mut Policy, _index: TableIndex) -> Result { 804 + build_error!(VTABLE_DEFAULT_ERROR) 805 + } 806 + 807 + /// Driver's `get` callback. 808 + fn get(_policy: &mut Policy) -> Result<u32> { 809 + build_error!(VTABLE_DEFAULT_ERROR) 810 + } 811 + 812 + /// Driver's `update_limits` callback. 813 + fn update_limits(_policy: &mut Policy) { 814 + build_error!(VTABLE_DEFAULT_ERROR) 815 + } 816 + 817 + /// Driver's `bios_limit` callback. 818 + fn bios_limit(_policy: &mut Policy, _limit: &mut u32) -> Result { 819 + build_error!(VTABLE_DEFAULT_ERROR) 820 + } 821 + 822 + /// Driver's `set_boost` callback. 823 + fn set_boost(_policy: &mut Policy, _state: i32) -> Result { 824 + build_error!(VTABLE_DEFAULT_ERROR) 825 + } 826 + 827 + /// Driver's `register_em` callback. 828 + fn register_em(_policy: &mut Policy) { 829 + build_error!(VTABLE_DEFAULT_ERROR) 830 + } 831 + } 832 + 833 + /// CPU frequency driver Registration. 834 + /// 835 + /// ## Examples 836 + /// 837 + /// The following example demonstrates how to register a cpufreq driver. 838 + /// 839 + /// ``` 840 + /// use kernel::{ 841 + /// cpufreq, 842 + /// c_str, 843 + /// device::{Core, Device}, 844 + /// macros::vtable, 845 + /// of, platform, 846 + /// sync::Arc, 847 + /// }; 848 + /// struct SampleDevice; 849 + /// 850 + /// #[derive(Default)] 851 + /// struct SampleDriver; 852 + /// 853 + /// #[vtable] 854 + /// impl cpufreq::Driver for SampleDriver { 855 + /// const NAME: &'static CStr = c_str!("cpufreq-sample"); 856 + /// const FLAGS: u16 = cpufreq::flags::NEED_INITIAL_FREQ_CHECK | cpufreq::flags::IS_COOLING_DEV; 857 + /// const BOOST_ENABLED: bool = true; 858 + /// 859 + /// type PData = Arc<SampleDevice>; 860 + /// 861 + /// fn init(policy: &mut cpufreq::Policy) -> Result<Self::PData> { 862 + /// // Initialize here 863 + /// Ok(Arc::new(SampleDevice, GFP_KERNEL)?) 864 + /// } 865 + /// 866 + /// fn exit(_policy: &mut cpufreq::Policy, _data: Option<Self::PData>) -> Result { 867 + /// Ok(()) 868 + /// } 869 + /// 870 + /// fn suspend(policy: &mut cpufreq::Policy) -> Result { 871 + /// policy.generic_suspend() 872 + /// } 873 + /// 874 + /// fn verify(data: &mut cpufreq::PolicyData) -> Result { 875 + /// data.generic_verify() 876 + /// } 877 + /// 878 + /// fn target_index(policy: &mut cpufreq::Policy, index: cpufreq::TableIndex) -> Result { 879 + /// // Update CPU frequency 880 + /// Ok(()) 881 + /// } 882 + /// 883 + /// fn get(policy: &mut cpufreq::Policy) -> Result<u32> { 884 + /// policy.generic_get() 885 + /// } 886 + /// } 887 + /// 888 + /// impl platform::Driver for SampleDriver { 889 + /// type IdInfo = (); 890 + /// const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = None; 891 + /// 892 + /// fn probe( 893 + /// pdev: &platform::Device<Core>, 894 + /// _id_info: Option<&Self::IdInfo>, 895 + /// ) -> Result<Pin<KBox<Self>>> { 896 + /// cpufreq::Registration::<SampleDriver>::new_foreign_owned(pdev.as_ref())?; 897 + /// Ok(KBox::new(Self {}, GFP_KERNEL)?.into()) 898 + /// } 899 + /// } 900 + /// ``` 901 + #[repr(transparent)] 902 + pub struct Registration<T: Driver>(KBox<UnsafeCell<bindings::cpufreq_driver>>, PhantomData<T>); 903 + 904 + /// SAFETY: `Registration` doesn't offer any methods or access to fields when shared between threads 905 + /// or CPUs, so it is safe to share it. 906 + unsafe impl<T: Driver> Sync for Registration<T> {} 907 + 908 + #[allow(clippy::non_send_fields_in_send_ty)] 909 + /// SAFETY: Registration with and unregistration from the cpufreq subsystem can happen from any 910 + /// thread. 911 + unsafe impl<T: Driver> Send for Registration<T> {} 912 + 913 + impl<T: Driver> Registration<T> { 914 + const VTABLE: bindings::cpufreq_driver = bindings::cpufreq_driver { 915 + name: Self::copy_name(T::NAME), 916 + boost_enabled: T::BOOST_ENABLED, 917 + flags: T::FLAGS, 918 + 919 + // Initialize mandatory callbacks. 920 + init: Some(Self::init_callback), 921 + verify: Some(Self::verify_callback), 922 + 923 + // Initialize optional callbacks based on the traits of `T`. 924 + setpolicy: if T::HAS_SETPOLICY { 925 + Some(Self::setpolicy_callback) 926 + } else { 927 + None 928 + }, 929 + target: if T::HAS_TARGET { 930 + Some(Self::target_callback) 931 + } else { 932 + None 933 + }, 934 + target_index: if T::HAS_TARGET_INDEX { 935 + Some(Self::target_index_callback) 936 + } else { 937 + None 938 + }, 939 + fast_switch: if T::HAS_FAST_SWITCH { 940 + Some(Self::fast_switch_callback) 941 + } else { 942 + None 943 + }, 944 + adjust_perf: if T::HAS_ADJUST_PERF { 945 + Some(Self::adjust_perf_callback) 946 + } else { 947 + None 948 + }, 949 + get_intermediate: if T::HAS_GET_INTERMEDIATE { 950 + Some(Self::get_intermediate_callback) 951 + } else { 952 + None 953 + }, 954 + target_intermediate: if T::HAS_TARGET_INTERMEDIATE { 955 + Some(Self::target_intermediate_callback) 956 + } else { 957 + None 958 + }, 959 + get: if T::HAS_GET { 960 + Some(Self::get_callback) 961 + } else { 962 + None 963 + }, 964 + update_limits: if T::HAS_UPDATE_LIMITS { 965 + Some(Self::update_limits_callback) 966 + } else { 967 + None 968 + }, 969 + bios_limit: if T::HAS_BIOS_LIMIT { 970 + Some(Self::bios_limit_callback) 971 + } else { 972 + None 973 + }, 974 + online: if T::HAS_ONLINE { 975 + Some(Self::online_callback) 976 + } else { 977 + None 978 + }, 979 + offline: if T::HAS_OFFLINE { 980 + Some(Self::offline_callback) 981 + } else { 982 + None 983 + }, 984 + exit: if T::HAS_EXIT { 985 + Some(Self::exit_callback) 986 + } else { 987 + None 988 + }, 989 + suspend: if T::HAS_SUSPEND { 990 + Some(Self::suspend_callback) 991 + } else { 992 + None 993 + }, 994 + resume: if T::HAS_RESUME { 995 + Some(Self::resume_callback) 996 + } else { 997 + None 998 + }, 999 + ready: if T::HAS_READY { 1000 + Some(Self::ready_callback) 1001 + } else { 1002 + None 1003 + }, 1004 + set_boost: if T::HAS_SET_BOOST { 1005 + Some(Self::set_boost_callback) 1006 + } else { 1007 + None 1008 + }, 1009 + register_em: if T::HAS_REGISTER_EM { 1010 + Some(Self::register_em_callback) 1011 + } else { 1012 + None 1013 + }, 1014 + // SAFETY: All zeros is a valid value for `bindings::cpufreq_driver`. 1015 + ..unsafe { MaybeUninit::zeroed().assume_init() } 1016 + }; 1017 + 1018 + const fn copy_name(name: &'static CStr) -> [c_char; CPUFREQ_NAME_LEN] { 1019 + let src = name.as_bytes_with_nul(); 1020 + let mut dst = [0; CPUFREQ_NAME_LEN]; 1021 + 1022 + build_assert!(src.len() <= CPUFREQ_NAME_LEN); 1023 + 1024 + let mut i = 0; 1025 + while i < src.len() { 1026 + dst[i] = src[i]; 1027 + i += 1; 1028 + } 1029 + 1030 + dst 1031 + } 1032 + 1033 + /// Registers a CPU frequency driver with the cpufreq core. 1034 + pub fn new() -> Result<Self> { 1035 + // We can't use `&Self::VTABLE` directly because the cpufreq core modifies some fields in 1036 + // the C `struct cpufreq_driver`, which requires a mutable reference. 1037 + let mut drv = KBox::new(UnsafeCell::new(Self::VTABLE), GFP_KERNEL)?; 1038 + 1039 + // SAFETY: `drv` is guaranteed to be valid for the lifetime of `Registration`. 1040 + to_result(unsafe { bindings::cpufreq_register_driver(drv.get_mut()) })?; 1041 + 1042 + Ok(Self(drv, PhantomData)) 1043 + } 1044 + 1045 + /// Same as [`Registration::new`], but does not return a [`Registration`] instance. 1046 + /// 1047 + /// Instead the [`Registration`] is owned by [`Devres`] and will be revoked / dropped, once the 1048 + /// device is detached. 1049 + pub fn new_foreign_owned(dev: &Device<Bound>) -> Result { 1050 + Devres::new_foreign_owned(dev, Self::new()?, GFP_KERNEL) 1051 + } 1052 + } 1053 + 1054 + /// CPU frequency driver callbacks. 1055 + impl<T: Driver> Registration<T> { 1056 + /// Driver's `init` callback. 1057 + /// 1058 + /// SAFETY: Called from C. Inputs must be valid pointers. 1059 + extern "C" fn init_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int { 1060 + from_result(|| { 1061 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1062 + // lifetime of `policy`. 1063 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1064 + 1065 + let data = T::init(policy)?; 1066 + policy.set_data(data)?; 1067 + Ok(0) 1068 + }) 1069 + } 1070 + 1071 + /// Driver's `exit` callback. 1072 + /// 1073 + /// SAFETY: Called from C. Inputs must be valid pointers. 1074 + extern "C" fn exit_callback(ptr: *mut bindings::cpufreq_policy) { 1075 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1076 + // lifetime of `policy`. 1077 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1078 + 1079 + let data = policy.clear_data(); 1080 + let _ = T::exit(policy, data); 1081 + } 1082 + 1083 + /// Driver's `online` callback. 1084 + /// 1085 + /// SAFETY: Called from C. Inputs must be valid pointers. 1086 + extern "C" fn online_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int { 1087 + from_result(|| { 1088 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1089 + // lifetime of `policy`. 1090 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1091 + T::online(policy).map(|()| 0) 1092 + }) 1093 + } 1094 + 1095 + /// Driver's `offline` callback. 1096 + /// 1097 + /// SAFETY: Called from C. Inputs must be valid pointers. 1098 + extern "C" fn offline_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int { 1099 + from_result(|| { 1100 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1101 + // lifetime of `policy`. 1102 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1103 + T::offline(policy).map(|()| 0) 1104 + }) 1105 + } 1106 + 1107 + /// Driver's `suspend` callback. 1108 + /// 1109 + /// SAFETY: Called from C. Inputs must be valid pointers. 1110 + extern "C" fn suspend_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int { 1111 + from_result(|| { 1112 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1113 + // lifetime of `policy`. 1114 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1115 + T::suspend(policy).map(|()| 0) 1116 + }) 1117 + } 1118 + 1119 + /// Driver's `resume` callback. 1120 + /// 1121 + /// SAFETY: Called from C. Inputs must be valid pointers. 1122 + extern "C" fn resume_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int { 1123 + from_result(|| { 1124 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1125 + // lifetime of `policy`. 1126 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1127 + T::resume(policy).map(|()| 0) 1128 + }) 1129 + } 1130 + 1131 + /// Driver's `ready` callback. 1132 + /// 1133 + /// SAFETY: Called from C. Inputs must be valid pointers. 1134 + extern "C" fn ready_callback(ptr: *mut bindings::cpufreq_policy) { 1135 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1136 + // lifetime of `policy`. 1137 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1138 + T::ready(policy); 1139 + } 1140 + 1141 + /// Driver's `verify` callback. 1142 + /// 1143 + /// SAFETY: Called from C. Inputs must be valid pointers. 1144 + extern "C" fn verify_callback(ptr: *mut bindings::cpufreq_policy_data) -> kernel::ffi::c_int { 1145 + from_result(|| { 1146 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1147 + // lifetime of `policy`. 1148 + let data = unsafe { PolicyData::from_raw_mut(ptr) }; 1149 + T::verify(data).map(|()| 0) 1150 + }) 1151 + } 1152 + 1153 + /// Driver's `setpolicy` callback. 1154 + /// 1155 + /// SAFETY: Called from C. Inputs must be valid pointers. 1156 + extern "C" fn setpolicy_callback(ptr: *mut bindings::cpufreq_policy) -> kernel::ffi::c_int { 1157 + from_result(|| { 1158 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1159 + // lifetime of `policy`. 1160 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1161 + T::setpolicy(policy).map(|()| 0) 1162 + }) 1163 + } 1164 + 1165 + /// Driver's `target` callback. 1166 + /// 1167 + /// SAFETY: Called from C. Inputs must be valid pointers. 1168 + extern "C" fn target_callback( 1169 + ptr: *mut bindings::cpufreq_policy, 1170 + target_freq: u32, 1171 + relation: u32, 1172 + ) -> kernel::ffi::c_int { 1173 + from_result(|| { 1174 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1175 + // lifetime of `policy`. 1176 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1177 + T::target(policy, target_freq, Relation::new(relation)?).map(|()| 0) 1178 + }) 1179 + } 1180 + 1181 + /// Driver's `target_index` callback. 1182 + /// 1183 + /// SAFETY: Called from C. Inputs must be valid pointers. 1184 + extern "C" fn target_index_callback( 1185 + ptr: *mut bindings::cpufreq_policy, 1186 + index: u32, 1187 + ) -> kernel::ffi::c_int { 1188 + from_result(|| { 1189 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1190 + // lifetime of `policy`. 1191 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1192 + 1193 + // SAFETY: The C code guarantees that `index` corresponds to a valid entry in the 1194 + // frequency table. 1195 + let index = unsafe { TableIndex::new(index as usize) }; 1196 + 1197 + T::target_index(policy, index).map(|()| 0) 1198 + }) 1199 + } 1200 + 1201 + /// Driver's `fast_switch` callback. 1202 + /// 1203 + /// SAFETY: Called from C. Inputs must be valid pointers. 1204 + extern "C" fn fast_switch_callback( 1205 + ptr: *mut bindings::cpufreq_policy, 1206 + target_freq: u32, 1207 + ) -> kernel::ffi::c_uint { 1208 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1209 + // lifetime of `policy`. 1210 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1211 + T::fast_switch(policy, target_freq) 1212 + } 1213 + 1214 + /// Driver's `adjust_perf` callback. 1215 + extern "C" fn adjust_perf_callback( 1216 + cpu: u32, 1217 + min_perf: usize, 1218 + target_perf: usize, 1219 + capacity: usize, 1220 + ) { 1221 + if let Ok(mut policy) = PolicyCpu::from_cpu(cpu) { 1222 + T::adjust_perf(&mut policy, min_perf, target_perf, capacity); 1223 + } 1224 + } 1225 + 1226 + /// Driver's `get_intermediate` callback. 1227 + /// 1228 + /// SAFETY: Called from C. Inputs must be valid pointers. 1229 + extern "C" fn get_intermediate_callback( 1230 + ptr: *mut bindings::cpufreq_policy, 1231 + index: u32, 1232 + ) -> kernel::ffi::c_uint { 1233 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1234 + // lifetime of `policy`. 1235 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1236 + 1237 + // SAFETY: The C code guarantees that `index` corresponds to a valid entry in the 1238 + // frequency table. 1239 + let index = unsafe { TableIndex::new(index as usize) }; 1240 + 1241 + T::get_intermediate(policy, index) 1242 + } 1243 + 1244 + /// Driver's `target_intermediate` callback. 1245 + /// 1246 + /// SAFETY: Called from C. Inputs must be valid pointers. 1247 + extern "C" fn target_intermediate_callback( 1248 + ptr: *mut bindings::cpufreq_policy, 1249 + index: u32, 1250 + ) -> kernel::ffi::c_int { 1251 + from_result(|| { 1252 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1253 + // lifetime of `policy`. 1254 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1255 + 1256 + // SAFETY: The C code guarantees that `index` corresponds to a valid entry in the 1257 + // frequency table. 1258 + let index = unsafe { TableIndex::new(index as usize) }; 1259 + 1260 + T::target_intermediate(policy, index).map(|()| 0) 1261 + }) 1262 + } 1263 + 1264 + /// Driver's `get` callback. 1265 + extern "C" fn get_callback(cpu: u32) -> kernel::ffi::c_uint { 1266 + PolicyCpu::from_cpu(cpu).map_or(0, |mut policy| T::get(&mut policy).map_or(0, |f| f)) 1267 + } 1268 + 1269 + /// Driver's `update_limit` callback. 1270 + extern "C" fn update_limits_callback(ptr: *mut bindings::cpufreq_policy) { 1271 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1272 + // lifetime of `policy`. 1273 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1274 + T::update_limits(policy); 1275 + } 1276 + 1277 + /// Driver's `bios_limit` callback. 1278 + /// 1279 + /// SAFETY: Called from C. Inputs must be valid pointers. 1280 + extern "C" fn bios_limit_callback(cpu: i32, limit: *mut u32) -> kernel::ffi::c_int { 1281 + from_result(|| { 1282 + let mut policy = PolicyCpu::from_cpu(cpu as u32)?; 1283 + 1284 + // SAFETY: `limit` is guaranteed by the C code to be valid. 1285 + T::bios_limit(&mut policy, &mut (unsafe { *limit })).map(|()| 0) 1286 + }) 1287 + } 1288 + 1289 + /// Driver's `set_boost` callback. 1290 + /// 1291 + /// SAFETY: Called from C. Inputs must be valid pointers. 1292 + extern "C" fn set_boost_callback( 1293 + ptr: *mut bindings::cpufreq_policy, 1294 + state: i32, 1295 + ) -> kernel::ffi::c_int { 1296 + from_result(|| { 1297 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1298 + // lifetime of `policy`. 1299 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1300 + T::set_boost(policy, state).map(|()| 0) 1301 + }) 1302 + } 1303 + 1304 + /// Driver's `register_em` callback. 1305 + /// 1306 + /// SAFETY: Called from C. Inputs must be valid pointers. 1307 + extern "C" fn register_em_callback(ptr: *mut bindings::cpufreq_policy) { 1308 + // SAFETY: The `ptr` is guaranteed to be valid by the contract with the C code for the 1309 + // lifetime of `policy`. 1310 + let policy = unsafe { Policy::from_raw_mut(ptr) }; 1311 + T::register_em(policy); 1312 + } 1313 + } 1314 + 1315 + impl<T: Driver> Drop for Registration<T> { 1316 + /// Unregisters with the cpufreq core. 1317 + fn drop(&mut self) { 1318 + // SAFETY: `self.0` is guaranteed to be valid for the lifetime of `Registration`. 1319 + unsafe { bindings::cpufreq_unregister_driver(self.0.get_mut()) }; 1320 + } 1321 + }
+330
rust/kernel/cpumask.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + //! CPU Mask abstractions. 4 + //! 5 + //! C header: [`include/linux/cpumask.h`](srctree/include/linux/cpumask.h) 6 + 7 + use crate::{ 8 + alloc::{AllocError, Flags}, 9 + prelude::*, 10 + types::Opaque, 11 + }; 12 + 13 + #[cfg(CONFIG_CPUMASK_OFFSTACK)] 14 + use core::ptr::{self, NonNull}; 15 + 16 + #[cfg(not(CONFIG_CPUMASK_OFFSTACK))] 17 + use core::mem::MaybeUninit; 18 + 19 + use core::ops::{Deref, DerefMut}; 20 + 21 + /// A CPU Mask. 22 + /// 23 + /// Rust abstraction for the C `struct cpumask`. 24 + /// 25 + /// # Invariants 26 + /// 27 + /// A [`Cpumask`] instance always corresponds to a valid C `struct cpumask`. 28 + /// 29 + /// The callers must ensure that the `struct cpumask` is valid for access and 30 + /// remains valid for the lifetime of the returned reference. 31 + /// 32 + /// ## Examples 33 + /// 34 + /// The following example demonstrates how to update a [`Cpumask`]. 35 + /// 36 + /// ``` 37 + /// use kernel::bindings; 38 + /// use kernel::cpumask::Cpumask; 39 + /// 40 + /// fn set_clear_cpu(ptr: *mut bindings::cpumask, set_cpu: u32, clear_cpu: i32) { 41 + /// // SAFETY: The `ptr` is valid for writing and remains valid for the lifetime of the 42 + /// // returned reference. 43 + /// let mask = unsafe { Cpumask::as_mut_ref(ptr) }; 44 + /// 45 + /// mask.set(set_cpu); 46 + /// mask.clear(clear_cpu); 47 + /// } 48 + /// ``` 49 + #[repr(transparent)] 50 + pub struct Cpumask(Opaque<bindings::cpumask>); 51 + 52 + impl Cpumask { 53 + /// Creates a mutable reference to an existing `struct cpumask` pointer. 54 + /// 55 + /// # Safety 56 + /// 57 + /// The caller must ensure that `ptr` is valid for writing and remains valid for the lifetime 58 + /// of the returned reference. 59 + pub unsafe fn as_mut_ref<'a>(ptr: *mut bindings::cpumask) -> &'a mut Self { 60 + // SAFETY: Guaranteed by the safety requirements of the function. 61 + // 62 + // INVARIANT: The caller ensures that `ptr` is valid for writing and remains valid for the 63 + // lifetime of the returned reference. 64 + unsafe { &mut *ptr.cast() } 65 + } 66 + 67 + /// Creates a reference to an existing `struct cpumask` pointer. 68 + /// 69 + /// # Safety 70 + /// 71 + /// The caller must ensure that `ptr` is valid for reading and remains valid for the lifetime 72 + /// of the returned reference. 73 + pub unsafe fn as_ref<'a>(ptr: *const bindings::cpumask) -> &'a Self { 74 + // SAFETY: Guaranteed by the safety requirements of the function. 75 + // 76 + // INVARIANT: The caller ensures that `ptr` is valid for reading and remains valid for the 77 + // lifetime of the returned reference. 78 + unsafe { &*ptr.cast() } 79 + } 80 + 81 + /// Obtain the raw `struct cpumask` pointer. 82 + pub fn as_raw(&self) -> *mut bindings::cpumask { 83 + let this: *const Self = self; 84 + this.cast_mut().cast() 85 + } 86 + 87 + /// Set `cpu` in the cpumask. 88 + /// 89 + /// ATTENTION: Contrary to C, this Rust `set()` method is non-atomic. 90 + /// This mismatches kernel naming convention and corresponds to the C 91 + /// function `__cpumask_set_cpu()`. 92 + #[inline] 93 + pub fn set(&mut self, cpu: u32) { 94 + // SAFETY: By the type invariant, `self.as_raw` is a valid argument to `__cpumask_set_cpu`. 95 + unsafe { bindings::__cpumask_set_cpu(cpu, self.as_raw()) }; 96 + } 97 + 98 + /// Clear `cpu` in the cpumask. 99 + /// 100 + /// ATTENTION: Contrary to C, this Rust `clear()` method is non-atomic. 101 + /// This mismatches kernel naming convention and corresponds to the C 102 + /// function `__cpumask_clear_cpu()`. 103 + #[inline] 104 + pub fn clear(&mut self, cpu: i32) { 105 + // SAFETY: By the type invariant, `self.as_raw` is a valid argument to 106 + // `__cpumask_clear_cpu`. 107 + unsafe { bindings::__cpumask_clear_cpu(cpu, self.as_raw()) }; 108 + } 109 + 110 + /// Test `cpu` in the cpumask. 111 + /// 112 + /// Equivalent to the kernel's `cpumask_test_cpu` API. 113 + #[inline] 114 + pub fn test(&self, cpu: i32) -> bool { 115 + // SAFETY: By the type invariant, `self.as_raw` is a valid argument to `cpumask_test_cpu`. 116 + unsafe { bindings::cpumask_test_cpu(cpu, self.as_raw()) } 117 + } 118 + 119 + /// Set all CPUs in the cpumask. 120 + /// 121 + /// Equivalent to the kernel's `cpumask_setall` API. 122 + #[inline] 123 + pub fn setall(&mut self) { 124 + // SAFETY: By the type invariant, `self.as_raw` is a valid argument to `cpumask_setall`. 125 + unsafe { bindings::cpumask_setall(self.as_raw()) }; 126 + } 127 + 128 + /// Checks if cpumask is empty. 129 + /// 130 + /// Equivalent to the kernel's `cpumask_empty` API. 131 + #[inline] 132 + pub fn empty(&self) -> bool { 133 + // SAFETY: By the type invariant, `self.as_raw` is a valid argument to `cpumask_empty`. 134 + unsafe { bindings::cpumask_empty(self.as_raw()) } 135 + } 136 + 137 + /// Checks if cpumask is full. 138 + /// 139 + /// Equivalent to the kernel's `cpumask_full` API. 140 + #[inline] 141 + pub fn full(&self) -> bool { 142 + // SAFETY: By the type invariant, `self.as_raw` is a valid argument to `cpumask_full`. 143 + unsafe { bindings::cpumask_full(self.as_raw()) } 144 + } 145 + 146 + /// Get weight of the cpumask. 147 + /// 148 + /// Equivalent to the kernel's `cpumask_weight` API. 149 + #[inline] 150 + pub fn weight(&self) -> u32 { 151 + // SAFETY: By the type invariant, `self.as_raw` is a valid argument to `cpumask_weight`. 152 + unsafe { bindings::cpumask_weight(self.as_raw()) } 153 + } 154 + 155 + /// Copy cpumask. 156 + /// 157 + /// Equivalent to the kernel's `cpumask_copy` API. 158 + #[inline] 159 + pub fn copy(&self, dstp: &mut Self) { 160 + // SAFETY: By the type invariant, `Self::as_raw` is a valid argument to `cpumask_copy`. 161 + unsafe { bindings::cpumask_copy(dstp.as_raw(), self.as_raw()) }; 162 + } 163 + } 164 + 165 + /// A CPU Mask pointer. 166 + /// 167 + /// Rust abstraction for the C `struct cpumask_var_t`. 168 + /// 169 + /// # Invariants 170 + /// 171 + /// A [`CpumaskVar`] instance always corresponds to a valid C `struct cpumask_var_t`. 172 + /// 173 + /// The callers must ensure that the `struct cpumask_var_t` is valid for access and remains valid 174 + /// for the lifetime of [`CpumaskVar`]. 175 + /// 176 + /// ## Examples 177 + /// 178 + /// The following example demonstrates how to create and update a [`CpumaskVar`]. 179 + /// 180 + /// ``` 181 + /// use kernel::cpumask::CpumaskVar; 182 + /// 183 + /// let mut mask = CpumaskVar::new_zero(GFP_KERNEL).unwrap(); 184 + /// 185 + /// assert!(mask.empty()); 186 + /// mask.set(2); 187 + /// assert!(mask.test(2)); 188 + /// mask.set(3); 189 + /// assert!(mask.test(3)); 190 + /// assert_eq!(mask.weight(), 2); 191 + /// 192 + /// let mask2 = CpumaskVar::try_clone(&mask).unwrap(); 193 + /// assert!(mask2.test(2)); 194 + /// assert!(mask2.test(3)); 195 + /// assert_eq!(mask2.weight(), 2); 196 + /// ``` 197 + pub struct CpumaskVar { 198 + #[cfg(CONFIG_CPUMASK_OFFSTACK)] 199 + ptr: NonNull<Cpumask>, 200 + #[cfg(not(CONFIG_CPUMASK_OFFSTACK))] 201 + mask: Cpumask, 202 + } 203 + 204 + impl CpumaskVar { 205 + /// Creates a zero-initialized instance of the [`CpumaskVar`]. 206 + pub fn new_zero(_flags: Flags) -> Result<Self, AllocError> { 207 + Ok(Self { 208 + #[cfg(CONFIG_CPUMASK_OFFSTACK)] 209 + ptr: { 210 + let mut ptr: *mut bindings::cpumask = ptr::null_mut(); 211 + 212 + // SAFETY: It is safe to call this method as the reference to `ptr` is valid. 213 + // 214 + // INVARIANT: The associated memory is freed when the `CpumaskVar` goes out of 215 + // scope. 216 + unsafe { bindings::zalloc_cpumask_var(&mut ptr, _flags.as_raw()) }; 217 + NonNull::new(ptr.cast()).ok_or(AllocError)? 218 + }, 219 + 220 + #[cfg(not(CONFIG_CPUMASK_OFFSTACK))] 221 + // SAFETY: FFI type is valid to be zero-initialized. 222 + // 223 + // INVARIANT: The associated memory is freed when the `CpumaskVar` goes out of scope. 224 + mask: unsafe { core::mem::zeroed() }, 225 + }) 226 + } 227 + 228 + /// Creates an instance of the [`CpumaskVar`]. 229 + /// 230 + /// # Safety 231 + /// 232 + /// The caller must ensure that the returned [`CpumaskVar`] is properly initialized before 233 + /// getting used. 234 + pub unsafe fn new(_flags: Flags) -> Result<Self, AllocError> { 235 + Ok(Self { 236 + #[cfg(CONFIG_CPUMASK_OFFSTACK)] 237 + ptr: { 238 + let mut ptr: *mut bindings::cpumask = ptr::null_mut(); 239 + 240 + // SAFETY: It is safe to call this method as the reference to `ptr` is valid. 241 + // 242 + // INVARIANT: The associated memory is freed when the `CpumaskVar` goes out of 243 + // scope. 244 + unsafe { bindings::alloc_cpumask_var(&mut ptr, _flags.as_raw()) }; 245 + NonNull::new(ptr.cast()).ok_or(AllocError)? 246 + }, 247 + #[cfg(not(CONFIG_CPUMASK_OFFSTACK))] 248 + // SAFETY: Guaranteed by the safety requirements of the function. 249 + // 250 + // INVARIANT: The associated memory is freed when the `CpumaskVar` goes out of scope. 251 + mask: unsafe { MaybeUninit::uninit().assume_init() }, 252 + }) 253 + } 254 + 255 + /// Creates a mutable reference to an existing `struct cpumask_var_t` pointer. 256 + /// 257 + /// # Safety 258 + /// 259 + /// The caller must ensure that `ptr` is valid for writing and remains valid for the lifetime 260 + /// of the returned reference. 261 + pub unsafe fn as_mut_ref<'a>(ptr: *mut bindings::cpumask_var_t) -> &'a mut Self { 262 + // SAFETY: Guaranteed by the safety requirements of the function. 263 + // 264 + // INVARIANT: The caller ensures that `ptr` is valid for writing and remains valid for the 265 + // lifetime of the returned reference. 266 + unsafe { &mut *ptr.cast() } 267 + } 268 + 269 + /// Creates a reference to an existing `struct cpumask_var_t` pointer. 270 + /// 271 + /// # Safety 272 + /// 273 + /// The caller must ensure that `ptr` is valid for reading and remains valid for the lifetime 274 + /// of the returned reference. 275 + pub unsafe fn as_ref<'a>(ptr: *const bindings::cpumask_var_t) -> &'a Self { 276 + // SAFETY: Guaranteed by the safety requirements of the function. 277 + // 278 + // INVARIANT: The caller ensures that `ptr` is valid for reading and remains valid for the 279 + // lifetime of the returned reference. 280 + unsafe { &*ptr.cast() } 281 + } 282 + 283 + /// Clones cpumask. 284 + pub fn try_clone(cpumask: &Cpumask) -> Result<Self> { 285 + // SAFETY: The returned cpumask_var is initialized right after this call. 286 + let mut cpumask_var = unsafe { Self::new(GFP_KERNEL) }?; 287 + 288 + cpumask.copy(&mut cpumask_var); 289 + Ok(cpumask_var) 290 + } 291 + } 292 + 293 + // Make [`CpumaskVar`] behave like a pointer to [`Cpumask`]. 294 + impl Deref for CpumaskVar { 295 + type Target = Cpumask; 296 + 297 + #[cfg(CONFIG_CPUMASK_OFFSTACK)] 298 + fn deref(&self) -> &Self::Target { 299 + // SAFETY: The caller owns CpumaskVar, so it is safe to deref the cpumask. 300 + unsafe { &*self.ptr.as_ptr() } 301 + } 302 + 303 + #[cfg(not(CONFIG_CPUMASK_OFFSTACK))] 304 + fn deref(&self) -> &Self::Target { 305 + &self.mask 306 + } 307 + } 308 + 309 + impl DerefMut for CpumaskVar { 310 + #[cfg(CONFIG_CPUMASK_OFFSTACK)] 311 + fn deref_mut(&mut self) -> &mut Cpumask { 312 + // SAFETY: The caller owns CpumaskVar, so it is safe to deref the cpumask. 313 + unsafe { self.ptr.as_mut() } 314 + } 315 + 316 + #[cfg(not(CONFIG_CPUMASK_OFFSTACK))] 317 + fn deref_mut(&mut self) -> &mut Cpumask { 318 + &mut self.mask 319 + } 320 + } 321 + 322 + impl Drop for CpumaskVar { 323 + fn drop(&mut self) { 324 + #[cfg(CONFIG_CPUMASK_OFFSTACK)] 325 + // SAFETY: By the type invariant, `self.as_raw` is a valid argument to `free_cpumask_var`. 326 + unsafe { 327 + bindings::free_cpumask_var(self.as_raw()) 328 + }; 329 + } 330 + }
+88 -2
rust/kernel/device.rs
··· 9 9 str::CStr, 10 10 types::{ARef, Opaque}, 11 11 }; 12 - use core::{fmt, ptr}; 12 + use core::{fmt, marker::PhantomData, ptr}; 13 13 14 14 #[cfg(CONFIG_PRINTK)] 15 15 use crate::c_str; ··· 42 42 /// `bindings::device::release` is valid to be called from any thread, hence `ARef<Device>` can be 43 43 /// dropped from any thread. 44 44 #[repr(transparent)] 45 - pub struct Device(Opaque<bindings::device>); 45 + pub struct Device<Ctx: DeviceContext = Normal>(Opaque<bindings::device>, PhantomData<Ctx>); 46 46 47 47 impl Device { 48 48 /// Creates a new reference-counted abstraction instance of an existing `struct device` pointer. ··· 59 59 // SAFETY: By the safety requirements ptr is valid 60 60 unsafe { Self::as_ref(ptr) }.into() 61 61 } 62 + } 62 63 64 + impl<Ctx: DeviceContext> Device<Ctx> { 63 65 /// Obtain the raw `struct device *`. 64 66 pub(crate) fn as_raw(&self) -> *mut bindings::device { 65 67 self.0.get() ··· 191 189 } 192 190 } 193 191 192 + // SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s generic 193 + // argument. 194 + kernel::impl_device_context_deref!(unsafe { Device }); 195 + kernel::impl_device_context_into_aref!(Device); 196 + 194 197 // SAFETY: Instances of `Device` are always reference-counted. 195 198 unsafe impl crate::types::AlwaysRefCounted for Device { 196 199 fn inc_ref(&self) { ··· 232 225 /// any of the bus callbacks, such as `probe()`. 233 226 pub struct Core; 234 227 228 + /// The [`Bound`] context is the context of a bus specific device reference when it is guaranteed to 229 + /// be bound for the duration of its lifetime. 230 + pub struct Bound; 231 + 235 232 mod private { 236 233 pub trait Sealed {} 237 234 235 + impl Sealed for super::Bound {} 238 236 impl Sealed for super::Core {} 239 237 impl Sealed for super::Normal {} 240 238 } 241 239 240 + impl DeviceContext for Bound {} 242 241 impl DeviceContext for Core {} 243 242 impl DeviceContext for Normal {} 243 + 244 + /// # Safety 245 + /// 246 + /// The type given as `$device` must be a transparent wrapper of a type that doesn't depend on the 247 + /// generic argument of `$device`. 248 + #[doc(hidden)] 249 + #[macro_export] 250 + macro_rules! __impl_device_context_deref { 251 + (unsafe { $device:ident, $src:ty => $dst:ty }) => { 252 + impl ::core::ops::Deref for $device<$src> { 253 + type Target = $device<$dst>; 254 + 255 + fn deref(&self) -> &Self::Target { 256 + let ptr: *const Self = self; 257 + 258 + // CAST: `$device<$src>` and `$device<$dst>` transparently wrap the same type by the 259 + // safety requirement of the macro. 260 + let ptr = ptr.cast::<Self::Target>(); 261 + 262 + // SAFETY: `ptr` was derived from `&self`. 263 + unsafe { &*ptr } 264 + } 265 + } 266 + }; 267 + } 268 + 269 + /// Implement [`core::ops::Deref`] traits for allowed [`DeviceContext`] conversions of a (bus 270 + /// specific) device. 271 + /// 272 + /// # Safety 273 + /// 274 + /// The type given as `$device` must be a transparent wrapper of a type that doesn't depend on the 275 + /// generic argument of `$device`. 276 + #[macro_export] 277 + macro_rules! impl_device_context_deref { 278 + (unsafe { $device:ident }) => { 279 + // SAFETY: This macro has the exact same safety requirement as 280 + // `__impl_device_context_deref!`. 281 + ::kernel::__impl_device_context_deref!(unsafe { 282 + $device, 283 + $crate::device::Core => $crate::device::Bound 284 + }); 285 + 286 + // SAFETY: This macro has the exact same safety requirement as 287 + // `__impl_device_context_deref!`. 288 + ::kernel::__impl_device_context_deref!(unsafe { 289 + $device, 290 + $crate::device::Bound => $crate::device::Normal 291 + }); 292 + }; 293 + } 294 + 295 + #[doc(hidden)] 296 + #[macro_export] 297 + macro_rules! __impl_device_context_into_aref { 298 + ($src:ty, $device:tt) => { 299 + impl ::core::convert::From<&$device<$src>> for $crate::types::ARef<$device> { 300 + fn from(dev: &$device<$src>) -> Self { 301 + (&**dev).into() 302 + } 303 + } 304 + }; 305 + } 306 + 307 + /// Implement [`core::convert::From`], such that all `&Device<Ctx>` can be converted to an 308 + /// `ARef<Device>`. 309 + #[macro_export] 310 + macro_rules! impl_device_context_into_aref { 311 + ($device:tt) => { 312 + ::kernel::__impl_device_context_into_aref!($crate::device::Core, $device); 313 + ::kernel::__impl_device_context_into_aref!($crate::device::Bound, $device); 314 + }; 315 + } 244 316 245 317 #[doc(hidden)] 246 318 #[macro_export]
+7 -10
rust/kernel/devres.rs
··· 8 8 use crate::{ 9 9 alloc::Flags, 10 10 bindings, 11 - device::Device, 11 + device::{Bound, Device}, 12 12 error::{Error, Result}, 13 13 ffi::c_void, 14 14 prelude::*, ··· 45 45 /// # Example 46 46 /// 47 47 /// ```no_run 48 - /// # use kernel::{bindings, c_str, device::Device, devres::Devres, io::{Io, IoRaw}}; 48 + /// # use kernel::{bindings, c_str, device::{Bound, Device}, devres::Devres, io::{Io, IoRaw}}; 49 49 /// # use core::ops::Deref; 50 50 /// 51 51 /// // See also [`pci::Bar`] for a real example. ··· 83 83 /// unsafe { Io::from_raw(&self.0) } 84 84 /// } 85 85 /// } 86 - /// # fn no_run() -> Result<(), Error> { 87 - /// # // SAFETY: Invalid usage; just for the example to get an `ARef<Device>` instance. 88 - /// # let dev = unsafe { Device::get_device(core::ptr::null_mut()) }; 89 - /// 86 + /// # fn no_run(dev: &Device<Bound>) -> Result<(), Error> { 90 87 /// // SAFETY: Invalid usage for example purposes. 91 88 /// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? }; 92 - /// let devres = Devres::new(&dev, iomem, GFP_KERNEL)?; 89 + /// let devres = Devres::new(dev, iomem, GFP_KERNEL)?; 93 90 /// 94 91 /// let res = devres.try_access().ok_or(ENXIO)?; 95 92 /// res.write8(0x42, 0x0); ··· 96 99 pub struct Devres<T>(Arc<DevresInner<T>>); 97 100 98 101 impl<T> DevresInner<T> { 99 - fn new(dev: &Device, data: T, flags: Flags) -> Result<Arc<DevresInner<T>>> { 102 + fn new(dev: &Device<Bound>, data: T, flags: Flags) -> Result<Arc<DevresInner<T>>> { 100 103 let inner = Arc::pin_init( 101 104 pin_init!( DevresInner { 102 105 dev: dev.into(), ··· 168 171 impl<T> Devres<T> { 169 172 /// Creates a new [`Devres`] instance of the given `data`. The `data` encapsulated within the 170 173 /// returned `Devres` instance' `data` will be revoked once the device is detached. 171 - pub fn new(dev: &Device, data: T, flags: Flags) -> Result<Self> { 174 + pub fn new(dev: &Device<Bound>, data: T, flags: Flags) -> Result<Self> { 172 175 let inner = DevresInner::new(dev, data, flags)?; 173 176 174 177 Ok(Devres(inner)) ··· 176 179 177 180 /// Same as [`Devres::new`], but does not return a `Devres` instance. Instead the given `data` 178 181 /// is owned by devres and will be revoked / dropped, once the device is detached. 179 - pub fn new_foreign_owned(dev: &Device, data: T, flags: Flags) -> Result { 182 + pub fn new_foreign_owned(dev: &Device<Bound>, data: T, flags: Flags) -> Result { 180 183 let _ = DevresInner::new(dev, data, flags)?; 181 184 182 185 Ok(())
+7
rust/kernel/lib.rs
··· 42 42 pub mod block; 43 43 #[doc(hidden)] 44 44 pub mod build_assert; 45 + pub mod clk; 46 + pub mod cpu; 47 + #[cfg(CONFIG_CPU_FREQ)] 48 + pub mod cpufreq; 49 + pub mod cpumask; 45 50 pub mod cred; 46 51 pub mod device; 47 52 pub mod device_id; ··· 69 64 #[cfg(CONFIG_NET)] 70 65 pub mod net; 71 66 pub mod of; 67 + #[cfg(CONFIG_PM_OPP)] 68 + pub mod opp; 72 69 pub mod page; 73 70 #[cfg(CONFIG_PCI)] 74 71 pub mod pci;
+1145
rust/kernel/opp.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + //! Operating performance points. 4 + //! 5 + //! This module provides rust abstractions for interacting with the OPP subsystem. 6 + //! 7 + //! C header: [`include/linux/pm_opp.h`](srctree/include/linux/pm_opp.h) 8 + //! 9 + //! Reference: <https://docs.kernel.org/power/opp.html> 10 + 11 + use crate::{ 12 + clk::Hertz, 13 + cpumask::{Cpumask, CpumaskVar}, 14 + device::Device, 15 + error::{code::*, from_err_ptr, from_result, to_result, Error, Result, VTABLE_DEFAULT_ERROR}, 16 + ffi::c_ulong, 17 + prelude::*, 18 + str::CString, 19 + types::{ARef, AlwaysRefCounted, Opaque}, 20 + }; 21 + 22 + #[cfg(CONFIG_CPU_FREQ)] 23 + /// Frequency table implementation. 24 + mod freq { 25 + use super::*; 26 + use crate::cpufreq; 27 + use core::ops::Deref; 28 + 29 + /// OPP frequency table. 30 + /// 31 + /// A [`cpufreq::Table`] created from [`Table`]. 32 + pub struct FreqTable { 33 + dev: ARef<Device>, 34 + ptr: *mut bindings::cpufreq_frequency_table, 35 + } 36 + 37 + impl FreqTable { 38 + /// Creates a new instance of [`FreqTable`] from [`Table`]. 39 + pub(crate) fn new(table: &Table) -> Result<Self> { 40 + let mut ptr: *mut bindings::cpufreq_frequency_table = ptr::null_mut(); 41 + 42 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 43 + // requirements. 44 + to_result(unsafe { 45 + bindings::dev_pm_opp_init_cpufreq_table(table.dev.as_raw(), &mut ptr) 46 + })?; 47 + 48 + Ok(Self { 49 + dev: table.dev.clone(), 50 + ptr, 51 + }) 52 + } 53 + 54 + /// Returns a reference to the underlying [`cpufreq::Table`]. 55 + #[inline] 56 + fn table(&self) -> &cpufreq::Table { 57 + // SAFETY: The `ptr` is guaranteed by the C code to be valid. 58 + unsafe { cpufreq::Table::from_raw(self.ptr) } 59 + } 60 + } 61 + 62 + impl Deref for FreqTable { 63 + type Target = cpufreq::Table; 64 + 65 + #[inline] 66 + fn deref(&self) -> &Self::Target { 67 + self.table() 68 + } 69 + } 70 + 71 + impl Drop for FreqTable { 72 + fn drop(&mut self) { 73 + // SAFETY: The pointer was created via `dev_pm_opp_init_cpufreq_table`, and is only 74 + // freed here. 75 + unsafe { 76 + bindings::dev_pm_opp_free_cpufreq_table(self.dev.as_raw(), &mut self.as_raw()) 77 + }; 78 + } 79 + } 80 + } 81 + 82 + #[cfg(CONFIG_CPU_FREQ)] 83 + pub use freq::FreqTable; 84 + 85 + use core::{marker::PhantomData, ptr}; 86 + 87 + use macros::vtable; 88 + 89 + /// Creates a null-terminated slice of pointers to [`Cstring`]s. 90 + fn to_c_str_array(names: &[CString]) -> Result<KVec<*const u8>> { 91 + // Allocated a null-terminated vector of pointers. 92 + let mut list = KVec::with_capacity(names.len() + 1, GFP_KERNEL)?; 93 + 94 + for name in names.iter() { 95 + list.push(name.as_ptr() as _, GFP_KERNEL)?; 96 + } 97 + 98 + list.push(ptr::null(), GFP_KERNEL)?; 99 + Ok(list) 100 + } 101 + 102 + /// The voltage unit. 103 + /// 104 + /// Represents voltage in microvolts, wrapping a [`c_ulong`] value. 105 + /// 106 + /// ## Examples 107 + /// 108 + /// ``` 109 + /// use kernel::opp::MicroVolt; 110 + /// 111 + /// let raw = 90500; 112 + /// let volt = MicroVolt(raw); 113 + /// 114 + /// assert_eq!(usize::from(volt), raw); 115 + /// assert_eq!(volt, MicroVolt(raw)); 116 + /// ``` 117 + #[derive(Copy, Clone, PartialEq, Eq, Debug)] 118 + pub struct MicroVolt(pub c_ulong); 119 + 120 + impl From<MicroVolt> for c_ulong { 121 + #[inline] 122 + fn from(volt: MicroVolt) -> Self { 123 + volt.0 124 + } 125 + } 126 + 127 + /// The power unit. 128 + /// 129 + /// Represents power in microwatts, wrapping a [`c_ulong`] value. 130 + /// 131 + /// ## Examples 132 + /// 133 + /// ``` 134 + /// use kernel::opp::MicroWatt; 135 + /// 136 + /// let raw = 1000000; 137 + /// let power = MicroWatt(raw); 138 + /// 139 + /// assert_eq!(usize::from(power), raw); 140 + /// assert_eq!(power, MicroWatt(raw)); 141 + /// ``` 142 + #[derive(Copy, Clone, PartialEq, Eq, Debug)] 143 + pub struct MicroWatt(pub c_ulong); 144 + 145 + impl From<MicroWatt> for c_ulong { 146 + #[inline] 147 + fn from(power: MicroWatt) -> Self { 148 + power.0 149 + } 150 + } 151 + 152 + /// Handle for a dynamically created [`OPP`]. 153 + /// 154 + /// The associated [`OPP`] is automatically removed when the [`Token`] is dropped. 155 + /// 156 + /// ## Examples 157 + /// 158 + /// The following example demonstrates how to create an [`OPP`] dynamically. 159 + /// 160 + /// ``` 161 + /// use kernel::clk::Hertz; 162 + /// use kernel::device::Device; 163 + /// use kernel::error::Result; 164 + /// use kernel::opp::{Data, MicroVolt, Token}; 165 + /// use kernel::types::ARef; 166 + /// 167 + /// fn create_opp(dev: &ARef<Device>, freq: Hertz, volt: MicroVolt, level: u32) -> Result<Token> { 168 + /// let data = Data::new(freq, volt, level, false); 169 + /// 170 + /// // OPP is removed once token goes out of scope. 171 + /// data.add_opp(dev) 172 + /// } 173 + /// ``` 174 + pub struct Token { 175 + dev: ARef<Device>, 176 + freq: Hertz, 177 + } 178 + 179 + impl Token { 180 + /// Dynamically adds an [`OPP`] and returns a [`Token`] that removes it on drop. 181 + fn new(dev: &ARef<Device>, mut data: Data) -> Result<Self> { 182 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 183 + // requirements. 184 + to_result(unsafe { bindings::dev_pm_opp_add_dynamic(dev.as_raw(), &mut data.0) })?; 185 + Ok(Self { 186 + dev: dev.clone(), 187 + freq: data.freq(), 188 + }) 189 + } 190 + } 191 + 192 + impl Drop for Token { 193 + fn drop(&mut self) { 194 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 195 + // requirements. 196 + unsafe { bindings::dev_pm_opp_remove(self.dev.as_raw(), self.freq.into()) }; 197 + } 198 + } 199 + 200 + /// OPP data. 201 + /// 202 + /// Rust abstraction for the C `struct dev_pm_opp_data`, used to define operating performance 203 + /// points (OPPs) dynamically. 204 + /// 205 + /// ## Examples 206 + /// 207 + /// The following example demonstrates how to create an [`OPP`] with [`Data`]. 208 + /// 209 + /// ``` 210 + /// use kernel::clk::Hertz; 211 + /// use kernel::device::Device; 212 + /// use kernel::error::Result; 213 + /// use kernel::opp::{Data, MicroVolt, Token}; 214 + /// use kernel::types::ARef; 215 + /// 216 + /// fn create_opp(dev: &ARef<Device>, freq: Hertz, volt: MicroVolt, level: u32) -> Result<Token> { 217 + /// let data = Data::new(freq, volt, level, false); 218 + /// 219 + /// // OPP is removed once token goes out of scope. 220 + /// data.add_opp(dev) 221 + /// } 222 + /// ``` 223 + #[repr(transparent)] 224 + pub struct Data(bindings::dev_pm_opp_data); 225 + 226 + impl Data { 227 + /// Creates a new instance of [`Data`]. 228 + /// 229 + /// This can be used to define a dynamic OPP to be added to a device. 230 + pub fn new(freq: Hertz, volt: MicroVolt, level: u32, turbo: bool) -> Self { 231 + Self(bindings::dev_pm_opp_data { 232 + turbo, 233 + freq: freq.into(), 234 + u_volt: volt.into(), 235 + level, 236 + }) 237 + } 238 + 239 + /// Adds an [`OPP`] dynamically. 240 + /// 241 + /// Returns a [`Token`] that ensures the OPP is automatically removed 242 + /// when it goes out of scope. 243 + #[inline] 244 + pub fn add_opp(self, dev: &ARef<Device>) -> Result<Token> { 245 + Token::new(dev, self) 246 + } 247 + 248 + /// Returns the frequency associated with this OPP data. 249 + #[inline] 250 + fn freq(&self) -> Hertz { 251 + Hertz(self.0.freq) 252 + } 253 + } 254 + 255 + /// [`OPP`] search options. 256 + /// 257 + /// ## Examples 258 + /// 259 + /// Defines how to search for an [`OPP`] in a [`Table`] relative to a frequency. 260 + /// 261 + /// ``` 262 + /// use kernel::clk::Hertz; 263 + /// use kernel::error::Result; 264 + /// use kernel::opp::{OPP, SearchType, Table}; 265 + /// use kernel::types::ARef; 266 + /// 267 + /// fn find_opp(table: &Table, freq: Hertz) -> Result<ARef<OPP>> { 268 + /// let opp = table.opp_from_freq(freq, Some(true), None, SearchType::Exact)?; 269 + /// 270 + /// pr_info!("OPP frequency is: {:?}\n", opp.freq(None)); 271 + /// pr_info!("OPP voltage is: {:?}\n", opp.voltage()); 272 + /// pr_info!("OPP level is: {}\n", opp.level()); 273 + /// pr_info!("OPP power is: {:?}\n", opp.power()); 274 + /// 275 + /// Ok(opp) 276 + /// } 277 + /// ``` 278 + #[derive(Copy, Clone, Debug, Eq, PartialEq)] 279 + pub enum SearchType { 280 + /// Match the exact frequency. 281 + Exact, 282 + /// Find the highest frequency less than or equal to the given value. 283 + Floor, 284 + /// Find the lowest frequency greater than or equal to the given value. 285 + Ceil, 286 + } 287 + 288 + /// OPP configuration callbacks. 289 + /// 290 + /// Implement this trait to customize OPP clock and regulator setup for your device. 291 + #[vtable] 292 + pub trait ConfigOps { 293 + /// This is typically used to scale clocks when transitioning between OPPs. 294 + #[inline] 295 + fn config_clks(_dev: &Device, _table: &Table, _opp: &OPP, _scaling_down: bool) -> Result { 296 + build_error!(VTABLE_DEFAULT_ERROR) 297 + } 298 + 299 + /// This provides access to the old and new OPPs, allowing for safe regulator adjustments. 300 + #[inline] 301 + fn config_regulators( 302 + _dev: &Device, 303 + _opp_old: &OPP, 304 + _opp_new: &OPP, 305 + _data: *mut *mut bindings::regulator, 306 + _count: u32, 307 + ) -> Result { 308 + build_error!(VTABLE_DEFAULT_ERROR) 309 + } 310 + } 311 + 312 + /// OPP configuration token. 313 + /// 314 + /// Returned by the OPP core when configuration is applied to a [`Device`]. The associated 315 + /// configuration is automatically cleared when the token is dropped. 316 + pub struct ConfigToken(i32); 317 + 318 + impl Drop for ConfigToken { 319 + fn drop(&mut self) { 320 + // SAFETY: This is the same token value returned by the C code via `dev_pm_opp_set_config`. 321 + unsafe { bindings::dev_pm_opp_clear_config(self.0) }; 322 + } 323 + } 324 + 325 + /// OPP configurations. 326 + /// 327 + /// Rust abstraction for the C `struct dev_pm_opp_config`. 328 + /// 329 + /// ## Examples 330 + /// 331 + /// The following example demonstrates how to set OPP property-name configuration for a [`Device`]. 332 + /// 333 + /// ``` 334 + /// use kernel::device::Device; 335 + /// use kernel::error::Result; 336 + /// use kernel::opp::{Config, ConfigOps, ConfigToken}; 337 + /// use kernel::str::CString; 338 + /// use kernel::types::ARef; 339 + /// use kernel::macros::vtable; 340 + /// 341 + /// #[derive(Default)] 342 + /// struct Driver; 343 + /// 344 + /// #[vtable] 345 + /// impl ConfigOps for Driver {} 346 + /// 347 + /// fn configure(dev: &ARef<Device>) -> Result<ConfigToken> { 348 + /// let name = CString::try_from_fmt(fmt!("{}", "slow"))?; 349 + /// 350 + /// // The OPP configuration is cleared once the [`ConfigToken`] goes out of scope. 351 + /// Config::<Driver>::new() 352 + /// .set_prop_name(name)? 353 + /// .set(dev) 354 + /// } 355 + /// ``` 356 + #[derive(Default)] 357 + pub struct Config<T: ConfigOps> 358 + where 359 + T: Default, 360 + { 361 + clk_names: Option<KVec<CString>>, 362 + prop_name: Option<CString>, 363 + regulator_names: Option<KVec<CString>>, 364 + supported_hw: Option<KVec<u32>>, 365 + 366 + // Tuple containing (required device, index) 367 + required_dev: Option<(ARef<Device>, u32)>, 368 + _data: PhantomData<T>, 369 + } 370 + 371 + impl<T: ConfigOps + Default> Config<T> { 372 + /// Creates a new instance of [`Config`]. 373 + #[inline] 374 + pub fn new() -> Self { 375 + Self::default() 376 + } 377 + 378 + /// Initializes clock names. 379 + pub fn set_clk_names(mut self, names: KVec<CString>) -> Result<Self> { 380 + if self.clk_names.is_some() { 381 + return Err(EBUSY); 382 + } 383 + 384 + if names.is_empty() { 385 + return Err(EINVAL); 386 + } 387 + 388 + self.clk_names = Some(names); 389 + Ok(self) 390 + } 391 + 392 + /// Initializes property name. 393 + pub fn set_prop_name(mut self, name: CString) -> Result<Self> { 394 + if self.prop_name.is_some() { 395 + return Err(EBUSY); 396 + } 397 + 398 + self.prop_name = Some(name); 399 + Ok(self) 400 + } 401 + 402 + /// Initializes regulator names. 403 + pub fn set_regulator_names(mut self, names: KVec<CString>) -> Result<Self> { 404 + if self.regulator_names.is_some() { 405 + return Err(EBUSY); 406 + } 407 + 408 + if names.is_empty() { 409 + return Err(EINVAL); 410 + } 411 + 412 + self.regulator_names = Some(names); 413 + 414 + Ok(self) 415 + } 416 + 417 + /// Initializes required devices. 418 + pub fn set_required_dev(mut self, dev: ARef<Device>, index: u32) -> Result<Self> { 419 + if self.required_dev.is_some() { 420 + return Err(EBUSY); 421 + } 422 + 423 + self.required_dev = Some((dev, index)); 424 + Ok(self) 425 + } 426 + 427 + /// Initializes supported hardware. 428 + pub fn set_supported_hw(mut self, hw: KVec<u32>) -> Result<Self> { 429 + if self.supported_hw.is_some() { 430 + return Err(EBUSY); 431 + } 432 + 433 + if hw.is_empty() { 434 + return Err(EINVAL); 435 + } 436 + 437 + self.supported_hw = Some(hw); 438 + Ok(self) 439 + } 440 + 441 + /// Sets the configuration with the OPP core. 442 + /// 443 + /// The returned [`ConfigToken`] will remove the configuration when dropped. 444 + pub fn set(self, dev: &Device) -> Result<ConfigToken> { 445 + let (_clk_list, clk_names) = match &self.clk_names { 446 + Some(x) => { 447 + let list = to_c_str_array(x)?; 448 + let ptr = list.as_ptr(); 449 + (Some(list), ptr) 450 + } 451 + None => (None, ptr::null()), 452 + }; 453 + 454 + let (_regulator_list, regulator_names) = match &self.regulator_names { 455 + Some(x) => { 456 + let list = to_c_str_array(x)?; 457 + let ptr = list.as_ptr(); 458 + (Some(list), ptr) 459 + } 460 + None => (None, ptr::null()), 461 + }; 462 + 463 + let prop_name = self 464 + .prop_name 465 + .as_ref() 466 + .map_or(ptr::null(), |p| p.as_char_ptr()); 467 + 468 + let (supported_hw, supported_hw_count) = self 469 + .supported_hw 470 + .as_ref() 471 + .map_or((ptr::null(), 0), |hw| (hw.as_ptr(), hw.len() as u32)); 472 + 473 + let (required_dev, required_dev_index) = self 474 + .required_dev 475 + .as_ref() 476 + .map_or((ptr::null_mut(), 0), |(dev, idx)| (dev.as_raw(), *idx)); 477 + 478 + let mut config = bindings::dev_pm_opp_config { 479 + clk_names, 480 + config_clks: if T::HAS_CONFIG_CLKS { 481 + Some(Self::config_clks) 482 + } else { 483 + None 484 + }, 485 + prop_name, 486 + regulator_names, 487 + config_regulators: if T::HAS_CONFIG_REGULATORS { 488 + Some(Self::config_regulators) 489 + } else { 490 + None 491 + }, 492 + supported_hw, 493 + supported_hw_count, 494 + 495 + required_dev, 496 + required_dev_index, 497 + }; 498 + 499 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 500 + // requirements. The OPP core guarantees not to access fields of [`Config`] after this call 501 + // and so we don't need to save a copy of them for future use. 502 + let ret = unsafe { bindings::dev_pm_opp_set_config(dev.as_raw(), &mut config) }; 503 + if ret < 0 { 504 + Err(Error::from_errno(ret)) 505 + } else { 506 + Ok(ConfigToken(ret)) 507 + } 508 + } 509 + 510 + /// Config's clk callback. 511 + /// 512 + /// SAFETY: Called from C. Inputs must be valid pointers. 513 + extern "C" fn config_clks( 514 + dev: *mut bindings::device, 515 + opp_table: *mut bindings::opp_table, 516 + opp: *mut bindings::dev_pm_opp, 517 + _data: *mut kernel::ffi::c_void, 518 + scaling_down: bool, 519 + ) -> kernel::ffi::c_int { 520 + from_result(|| { 521 + // SAFETY: 'dev' is guaranteed by the C code to be valid. 522 + let dev = unsafe { Device::get_device(dev) }; 523 + T::config_clks( 524 + &dev, 525 + // SAFETY: 'opp_table' is guaranteed by the C code to be valid. 526 + &unsafe { Table::from_raw_table(opp_table, &dev) }, 527 + // SAFETY: 'opp' is guaranteed by the C code to be valid. 528 + unsafe { OPP::from_raw_opp(opp)? }, 529 + scaling_down, 530 + ) 531 + .map(|()| 0) 532 + }) 533 + } 534 + 535 + /// Config's regulator callback. 536 + /// 537 + /// SAFETY: Called from C. Inputs must be valid pointers. 538 + extern "C" fn config_regulators( 539 + dev: *mut bindings::device, 540 + old_opp: *mut bindings::dev_pm_opp, 541 + new_opp: *mut bindings::dev_pm_opp, 542 + regulators: *mut *mut bindings::regulator, 543 + count: kernel::ffi::c_uint, 544 + ) -> kernel::ffi::c_int { 545 + from_result(|| { 546 + // SAFETY: 'dev' is guaranteed by the C code to be valid. 547 + let dev = unsafe { Device::get_device(dev) }; 548 + T::config_regulators( 549 + &dev, 550 + // SAFETY: 'old_opp' is guaranteed by the C code to be valid. 551 + unsafe { OPP::from_raw_opp(old_opp)? }, 552 + // SAFETY: 'new_opp' is guaranteed by the C code to be valid. 553 + unsafe { OPP::from_raw_opp(new_opp)? }, 554 + regulators, 555 + count, 556 + ) 557 + .map(|()| 0) 558 + }) 559 + } 560 + } 561 + 562 + /// A reference-counted OPP table. 563 + /// 564 + /// Rust abstraction for the C `struct opp_table`. 565 + /// 566 + /// # Invariants 567 + /// 568 + /// The pointer stored in `Self` is non-null and valid for the lifetime of the [`Table`]. 569 + /// 570 + /// Instances of this type are reference-counted. 571 + /// 572 + /// ## Examples 573 + /// 574 + /// The following example demonstrates how to get OPP [`Table`] for a [`Cpumask`] and set its 575 + /// frequency. 576 + /// 577 + /// ``` 578 + /// use kernel::clk::Hertz; 579 + /// use kernel::cpumask::Cpumask; 580 + /// use kernel::device::Device; 581 + /// use kernel::error::Result; 582 + /// use kernel::opp::Table; 583 + /// use kernel::types::ARef; 584 + /// 585 + /// fn get_table(dev: &ARef<Device>, mask: &mut Cpumask, freq: Hertz) -> Result<Table> { 586 + /// let mut opp_table = Table::from_of_cpumask(dev, mask)?; 587 + /// 588 + /// if opp_table.opp_count()? == 0 { 589 + /// return Err(EINVAL); 590 + /// } 591 + /// 592 + /// pr_info!("Max transition latency is: {} ns\n", opp_table.max_transition_latency_ns()); 593 + /// pr_info!("Suspend frequency is: {:?}\n", opp_table.suspend_freq()); 594 + /// 595 + /// opp_table.set_rate(freq)?; 596 + /// Ok(opp_table) 597 + /// } 598 + /// ``` 599 + pub struct Table { 600 + ptr: *mut bindings::opp_table, 601 + dev: ARef<Device>, 602 + #[allow(dead_code)] 603 + em: bool, 604 + #[allow(dead_code)] 605 + of: bool, 606 + cpus: Option<CpumaskVar>, 607 + } 608 + 609 + /// SAFETY: It is okay to send ownership of [`Table`] across thread boundaries. 610 + unsafe impl Send for Table {} 611 + 612 + /// SAFETY: It is okay to access [`Table`] through shared references from other threads because 613 + /// we're either accessing properties that don't change or that are properly synchronised by C code. 614 + unsafe impl Sync for Table {} 615 + 616 + impl Table { 617 + /// Creates a new reference-counted [`Table`] from a raw pointer. 618 + /// 619 + /// # Safety 620 + /// 621 + /// Callers must ensure that `ptr` is valid and non-null. 622 + unsafe fn from_raw_table(ptr: *mut bindings::opp_table, dev: &ARef<Device>) -> Self { 623 + // SAFETY: By the safety requirements, ptr is valid and its refcount will be incremented. 624 + // 625 + // INVARIANT: The reference-count is decremented when [`Table`] goes out of scope. 626 + unsafe { bindings::dev_pm_opp_get_opp_table_ref(ptr) }; 627 + 628 + Self { 629 + ptr, 630 + dev: dev.clone(), 631 + em: false, 632 + of: false, 633 + cpus: None, 634 + } 635 + } 636 + 637 + /// Creates a new reference-counted [`Table`] instance for a [`Device`]. 638 + pub fn from_dev(dev: &Device) -> Result<Self> { 639 + // SAFETY: The requirements are satisfied by the existence of the [`Device`] and its safety 640 + // requirements. 641 + // 642 + // INVARIANT: The reference-count is incremented by the C code and is decremented when 643 + // [`Table`] goes out of scope. 644 + let ptr = from_err_ptr(unsafe { bindings::dev_pm_opp_get_opp_table(dev.as_raw()) })?; 645 + 646 + Ok(Self { 647 + ptr, 648 + dev: dev.into(), 649 + em: false, 650 + of: false, 651 + cpus: None, 652 + }) 653 + } 654 + 655 + /// Creates a new reference-counted [`Table`] instance for a [`Device`] based on device tree 656 + /// entries. 657 + #[cfg(CONFIG_OF)] 658 + pub fn from_of(dev: &ARef<Device>, index: i32) -> Result<Self> { 659 + // SAFETY: The requirements are satisfied by the existence of the [`Device`] and its safety 660 + // requirements. 661 + // 662 + // INVARIANT: The reference-count is incremented by the C code and is decremented when 663 + // [`Table`] goes out of scope. 664 + to_result(unsafe { bindings::dev_pm_opp_of_add_table_indexed(dev.as_raw(), index) })?; 665 + 666 + // Get the newly created [`Table`]. 667 + let mut table = Self::from_dev(dev)?; 668 + table.of = true; 669 + 670 + Ok(table) 671 + } 672 + 673 + /// Remove device tree based [`Table`]. 674 + #[cfg(CONFIG_OF)] 675 + #[inline] 676 + fn remove_of(&self) { 677 + // SAFETY: The requirements are satisfied by the existence of the [`Device`] and its safety 678 + // requirements. We took the reference from [`from_of`] earlier, it is safe to drop the 679 + // same now. 680 + unsafe { bindings::dev_pm_opp_of_remove_table(self.dev.as_raw()) }; 681 + } 682 + 683 + /// Creates a new reference-counted [`Table`] instance for a [`Cpumask`] based on device tree 684 + /// entries. 685 + #[cfg(CONFIG_OF)] 686 + pub fn from_of_cpumask(dev: &Device, cpumask: &mut Cpumask) -> Result<Self> { 687 + // SAFETY: The cpumask is valid and the returned pointer will be owned by the [`Table`] 688 + // instance. 689 + // 690 + // INVARIANT: The reference-count is incremented by the C code and is decremented when 691 + // [`Table`] goes out of scope. 692 + to_result(unsafe { bindings::dev_pm_opp_of_cpumask_add_table(cpumask.as_raw()) })?; 693 + 694 + // Fetch the newly created table. 695 + let mut table = Self::from_dev(dev)?; 696 + table.cpus = Some(CpumaskVar::try_clone(cpumask)?); 697 + 698 + Ok(table) 699 + } 700 + 701 + /// Remove device tree based [`Table`] for a [`Cpumask`]. 702 + #[cfg(CONFIG_OF)] 703 + #[inline] 704 + fn remove_of_cpumask(&self, cpumask: &Cpumask) { 705 + // SAFETY: The cpumask is valid and we took the reference from [`from_of_cpumask`] earlier, 706 + // it is safe to drop the same now. 707 + unsafe { bindings::dev_pm_opp_of_cpumask_remove_table(cpumask.as_raw()) }; 708 + } 709 + 710 + /// Returns the number of [`OPP`]s in the [`Table`]. 711 + pub fn opp_count(&self) -> Result<u32> { 712 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 713 + // requirements. 714 + let ret = unsafe { bindings::dev_pm_opp_get_opp_count(self.dev.as_raw()) }; 715 + if ret < 0 { 716 + Err(Error::from_errno(ret)) 717 + } else { 718 + Ok(ret as u32) 719 + } 720 + } 721 + 722 + /// Returns max clock latency (in nanoseconds) of the [`OPP`]s in the [`Table`]. 723 + #[inline] 724 + pub fn max_clock_latency_ns(&self) -> usize { 725 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 726 + // requirements. 727 + unsafe { bindings::dev_pm_opp_get_max_clock_latency(self.dev.as_raw()) } 728 + } 729 + 730 + /// Returns max volt latency (in nanoseconds) of the [`OPP`]s in the [`Table`]. 731 + #[inline] 732 + pub fn max_volt_latency_ns(&self) -> usize { 733 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 734 + // requirements. 735 + unsafe { bindings::dev_pm_opp_get_max_volt_latency(self.dev.as_raw()) } 736 + } 737 + 738 + /// Returns max transition latency (in nanoseconds) of the [`OPP`]s in the [`Table`]. 739 + #[inline] 740 + pub fn max_transition_latency_ns(&self) -> usize { 741 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 742 + // requirements. 743 + unsafe { bindings::dev_pm_opp_get_max_transition_latency(self.dev.as_raw()) } 744 + } 745 + 746 + /// Returns the suspend [`OPP`]'s frequency. 747 + #[inline] 748 + pub fn suspend_freq(&self) -> Hertz { 749 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 750 + // requirements. 751 + Hertz(unsafe { bindings::dev_pm_opp_get_suspend_opp_freq(self.dev.as_raw()) }) 752 + } 753 + 754 + /// Synchronizes regulators used by the [`Table`]. 755 + #[inline] 756 + pub fn sync_regulators(&self) -> Result { 757 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 758 + // requirements. 759 + to_result(unsafe { bindings::dev_pm_opp_sync_regulators(self.dev.as_raw()) }) 760 + } 761 + 762 + /// Gets sharing CPUs. 763 + #[inline] 764 + pub fn sharing_cpus(dev: &Device, cpumask: &mut Cpumask) -> Result { 765 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 766 + // requirements. 767 + to_result(unsafe { bindings::dev_pm_opp_get_sharing_cpus(dev.as_raw(), cpumask.as_raw()) }) 768 + } 769 + 770 + /// Sets sharing CPUs. 771 + pub fn set_sharing_cpus(&mut self, cpumask: &mut Cpumask) -> Result { 772 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 773 + // requirements. 774 + to_result(unsafe { 775 + bindings::dev_pm_opp_set_sharing_cpus(self.dev.as_raw(), cpumask.as_raw()) 776 + })?; 777 + 778 + if let Some(mask) = self.cpus.as_mut() { 779 + // Update the cpumask as this will be used while removing the table. 780 + cpumask.copy(mask); 781 + } 782 + 783 + Ok(()) 784 + } 785 + 786 + /// Gets sharing CPUs from device tree. 787 + #[cfg(CONFIG_OF)] 788 + #[inline] 789 + pub fn of_sharing_cpus(dev: &Device, cpumask: &mut Cpumask) -> Result { 790 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 791 + // requirements. 792 + to_result(unsafe { 793 + bindings::dev_pm_opp_of_get_sharing_cpus(dev.as_raw(), cpumask.as_raw()) 794 + }) 795 + } 796 + 797 + /// Updates the voltage value for an [`OPP`]. 798 + #[inline] 799 + pub fn adjust_voltage( 800 + &self, 801 + freq: Hertz, 802 + volt: MicroVolt, 803 + volt_min: MicroVolt, 804 + volt_max: MicroVolt, 805 + ) -> Result { 806 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 807 + // requirements. 808 + to_result(unsafe { 809 + bindings::dev_pm_opp_adjust_voltage( 810 + self.dev.as_raw(), 811 + freq.into(), 812 + volt.into(), 813 + volt_min.into(), 814 + volt_max.into(), 815 + ) 816 + }) 817 + } 818 + 819 + /// Creates [`FreqTable`] from [`Table`]. 820 + #[cfg(CONFIG_CPU_FREQ)] 821 + #[inline] 822 + pub fn cpufreq_table(&mut self) -> Result<FreqTable> { 823 + FreqTable::new(self) 824 + } 825 + 826 + /// Configures device with [`OPP`] matching the frequency value. 827 + #[inline] 828 + pub fn set_rate(&self, freq: Hertz) -> Result { 829 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 830 + // requirements. 831 + to_result(unsafe { bindings::dev_pm_opp_set_rate(self.dev.as_raw(), freq.into()) }) 832 + } 833 + 834 + /// Configures device with [`OPP`]. 835 + #[inline] 836 + pub fn set_opp(&self, opp: &OPP) -> Result { 837 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 838 + // requirements. 839 + to_result(unsafe { bindings::dev_pm_opp_set_opp(self.dev.as_raw(), opp.as_raw()) }) 840 + } 841 + 842 + /// Finds [`OPP`] based on frequency. 843 + pub fn opp_from_freq( 844 + &self, 845 + freq: Hertz, 846 + available: Option<bool>, 847 + index: Option<u32>, 848 + stype: SearchType, 849 + ) -> Result<ARef<OPP>> { 850 + let raw_dev = self.dev.as_raw(); 851 + let index = index.unwrap_or(0); 852 + let mut rate = freq.into(); 853 + 854 + let ptr = from_err_ptr(match stype { 855 + SearchType::Exact => { 856 + if let Some(available) = available { 857 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and 858 + // its safety requirements. The returned pointer will be owned by the new 859 + // [`OPP`] instance. 860 + unsafe { 861 + bindings::dev_pm_opp_find_freq_exact_indexed( 862 + raw_dev, rate, index, available, 863 + ) 864 + } 865 + } else { 866 + return Err(EINVAL); 867 + } 868 + } 869 + 870 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 871 + // requirements. The returned pointer will be owned by the new [`OPP`] instance. 872 + SearchType::Ceil => unsafe { 873 + bindings::dev_pm_opp_find_freq_ceil_indexed(raw_dev, &mut rate, index) 874 + }, 875 + 876 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 877 + // requirements. The returned pointer will be owned by the new [`OPP`] instance. 878 + SearchType::Floor => unsafe { 879 + bindings::dev_pm_opp_find_freq_floor_indexed(raw_dev, &mut rate, index) 880 + }, 881 + })?; 882 + 883 + // SAFETY: The `ptr` is guaranteed by the C code to be valid. 884 + unsafe { OPP::from_raw_opp_owned(ptr) } 885 + } 886 + 887 + /// Finds [`OPP`] based on level. 888 + pub fn opp_from_level(&self, mut level: u32, stype: SearchType) -> Result<ARef<OPP>> { 889 + let raw_dev = self.dev.as_raw(); 890 + 891 + let ptr = from_err_ptr(match stype { 892 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 893 + // requirements. The returned pointer will be owned by the new [`OPP`] instance. 894 + SearchType::Exact => unsafe { bindings::dev_pm_opp_find_level_exact(raw_dev, level) }, 895 + 896 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 897 + // requirements. The returned pointer will be owned by the new [`OPP`] instance. 898 + SearchType::Ceil => unsafe { 899 + bindings::dev_pm_opp_find_level_ceil(raw_dev, &mut level) 900 + }, 901 + 902 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 903 + // requirements. The returned pointer will be owned by the new [`OPP`] instance. 904 + SearchType::Floor => unsafe { 905 + bindings::dev_pm_opp_find_level_floor(raw_dev, &mut level) 906 + }, 907 + })?; 908 + 909 + // SAFETY: The `ptr` is guaranteed by the C code to be valid. 910 + unsafe { OPP::from_raw_opp_owned(ptr) } 911 + } 912 + 913 + /// Finds [`OPP`] based on bandwidth. 914 + pub fn opp_from_bw(&self, mut bw: u32, index: i32, stype: SearchType) -> Result<ARef<OPP>> { 915 + let raw_dev = self.dev.as_raw(); 916 + 917 + let ptr = from_err_ptr(match stype { 918 + // The OPP core doesn't support this yet. 919 + SearchType::Exact => return Err(EINVAL), 920 + 921 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 922 + // requirements. The returned pointer will be owned by the new [`OPP`] instance. 923 + SearchType::Ceil => unsafe { 924 + bindings::dev_pm_opp_find_bw_ceil(raw_dev, &mut bw, index) 925 + }, 926 + 927 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 928 + // requirements. The returned pointer will be owned by the new [`OPP`] instance. 929 + SearchType::Floor => unsafe { 930 + bindings::dev_pm_opp_find_bw_floor(raw_dev, &mut bw, index) 931 + }, 932 + })?; 933 + 934 + // SAFETY: The `ptr` is guaranteed by the C code to be valid. 935 + unsafe { OPP::from_raw_opp_owned(ptr) } 936 + } 937 + 938 + /// Enables the [`OPP`]. 939 + #[inline] 940 + pub fn enable_opp(&self, freq: Hertz) -> Result { 941 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 942 + // requirements. 943 + to_result(unsafe { bindings::dev_pm_opp_enable(self.dev.as_raw(), freq.into()) }) 944 + } 945 + 946 + /// Disables the [`OPP`]. 947 + #[inline] 948 + pub fn disable_opp(&self, freq: Hertz) -> Result { 949 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 950 + // requirements. 951 + to_result(unsafe { bindings::dev_pm_opp_disable(self.dev.as_raw(), freq.into()) }) 952 + } 953 + 954 + /// Registers with the Energy model. 955 + #[cfg(CONFIG_OF)] 956 + pub fn of_register_em(&mut self, cpumask: &mut Cpumask) -> Result { 957 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 958 + // requirements. 959 + to_result(unsafe { 960 + bindings::dev_pm_opp_of_register_em(self.dev.as_raw(), cpumask.as_raw()) 961 + })?; 962 + 963 + self.em = true; 964 + Ok(()) 965 + } 966 + 967 + /// Unregisters with the Energy model. 968 + #[cfg(all(CONFIG_OF, CONFIG_ENERGY_MODEL))] 969 + #[inline] 970 + fn of_unregister_em(&self) { 971 + // SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety 972 + // requirements. We registered with the EM framework earlier, it is safe to unregister now. 973 + unsafe { bindings::em_dev_unregister_perf_domain(self.dev.as_raw()) }; 974 + } 975 + } 976 + 977 + impl Drop for Table { 978 + fn drop(&mut self) { 979 + // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe 980 + // to relinquish it now. 981 + unsafe { bindings::dev_pm_opp_put_opp_table(self.ptr) }; 982 + 983 + #[cfg(CONFIG_OF)] 984 + { 985 + #[cfg(CONFIG_ENERGY_MODEL)] 986 + if self.em { 987 + self.of_unregister_em(); 988 + } 989 + 990 + if self.of { 991 + self.remove_of(); 992 + } else if let Some(cpumask) = self.cpus.take() { 993 + self.remove_of_cpumask(&cpumask); 994 + } 995 + } 996 + } 997 + } 998 + 999 + /// A reference-counted Operating performance point (OPP). 1000 + /// 1001 + /// Rust abstraction for the C `struct dev_pm_opp`. 1002 + /// 1003 + /// # Invariants 1004 + /// 1005 + /// The pointer stored in `Self` is non-null and valid for the lifetime of the [`OPP`]. 1006 + /// 1007 + /// Instances of this type are reference-counted. The reference count is incremented by the 1008 + /// `dev_pm_opp_get` function and decremented by `dev_pm_opp_put`. The Rust type `ARef<OPP>` 1009 + /// represents a pointer that owns a reference count on the [`OPP`]. 1010 + /// 1011 + /// A reference to the [`OPP`], &[`OPP`], isn't refcounted by the Rust code. 1012 + /// 1013 + /// ## Examples 1014 + /// 1015 + /// The following example demonstrates how to get [`OPP`] corresponding to a frequency value and 1016 + /// configure the device with it. 1017 + /// 1018 + /// ``` 1019 + /// use kernel::clk::Hertz; 1020 + /// use kernel::error::Result; 1021 + /// use kernel::opp::{SearchType, Table}; 1022 + /// 1023 + /// fn configure_opp(table: &Table, freq: Hertz) -> Result { 1024 + /// let opp = table.opp_from_freq(freq, Some(true), None, SearchType::Exact)?; 1025 + /// 1026 + /// if opp.freq(None) != freq { 1027 + /// return Err(EINVAL); 1028 + /// } 1029 + /// 1030 + /// table.set_opp(&opp) 1031 + /// } 1032 + /// ``` 1033 + #[repr(transparent)] 1034 + pub struct OPP(Opaque<bindings::dev_pm_opp>); 1035 + 1036 + /// SAFETY: It is okay to send the ownership of [`OPP`] across thread boundaries. 1037 + unsafe impl Send for OPP {} 1038 + 1039 + /// SAFETY: It is okay to access [`OPP`] through shared references from other threads because we're 1040 + /// either accessing properties that don't change or that are properly synchronised by C code. 1041 + unsafe impl Sync for OPP {} 1042 + 1043 + /// SAFETY: The type invariants guarantee that [`OPP`] is always refcounted. 1044 + unsafe impl AlwaysRefCounted for OPP { 1045 + fn inc_ref(&self) { 1046 + // SAFETY: The existence of a shared reference means that the refcount is nonzero. 1047 + unsafe { bindings::dev_pm_opp_get(self.0.get()) }; 1048 + } 1049 + 1050 + unsafe fn dec_ref(obj: ptr::NonNull<Self>) { 1051 + // SAFETY: The safety requirements guarantee that the refcount is nonzero. 1052 + unsafe { bindings::dev_pm_opp_put(obj.cast().as_ptr()) } 1053 + } 1054 + } 1055 + 1056 + impl OPP { 1057 + /// Creates an owned reference to a [`OPP`] from a valid pointer. 1058 + /// 1059 + /// The refcount is incremented by the C code and will be decremented by `dec_ref` when the 1060 + /// [`ARef`] object is dropped. 1061 + /// 1062 + /// # Safety 1063 + /// 1064 + /// The caller must ensure that `ptr` is valid and the refcount of the [`OPP`] is incremented. 1065 + /// The caller must also ensure that it doesn't explicitly drop the refcount of the [`OPP`], as 1066 + /// the returned [`ARef`] object takes over the refcount increment on the underlying object and 1067 + /// the same will be dropped along with it. 1068 + pub unsafe fn from_raw_opp_owned(ptr: *mut bindings::dev_pm_opp) -> Result<ARef<Self>> { 1069 + let ptr = ptr::NonNull::new(ptr).ok_or(ENODEV)?; 1070 + 1071 + // SAFETY: The safety requirements guarantee the validity of the pointer. 1072 + // 1073 + // INVARIANT: The reference-count is decremented when [`OPP`] goes out of scope. 1074 + Ok(unsafe { ARef::from_raw(ptr.cast()) }) 1075 + } 1076 + 1077 + /// Creates a reference to a [`OPP`] from a valid pointer. 1078 + /// 1079 + /// The refcount is not updated by the Rust API unless the returned reference is converted to 1080 + /// an [`ARef`] object. 1081 + /// 1082 + /// # Safety 1083 + /// 1084 + /// The caller must ensure that `ptr` is valid and remains valid for the duration of `'a`. 1085 + #[inline] 1086 + pub unsafe fn from_raw_opp<'a>(ptr: *mut bindings::dev_pm_opp) -> Result<&'a Self> { 1087 + // SAFETY: The caller guarantees that the pointer is not dangling and stays valid for the 1088 + // duration of 'a. The cast is okay because [`OPP`] is `repr(transparent)`. 1089 + Ok(unsafe { &*ptr.cast() }) 1090 + } 1091 + 1092 + #[inline] 1093 + fn as_raw(&self) -> *mut bindings::dev_pm_opp { 1094 + self.0.get() 1095 + } 1096 + 1097 + /// Returns the frequency of an [`OPP`]. 1098 + pub fn freq(&self, index: Option<u32>) -> Hertz { 1099 + let index = index.unwrap_or(0); 1100 + 1101 + // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to 1102 + // use it. 1103 + Hertz(unsafe { bindings::dev_pm_opp_get_freq_indexed(self.as_raw(), index) }) 1104 + } 1105 + 1106 + /// Returns the voltage of an [`OPP`]. 1107 + #[inline] 1108 + pub fn voltage(&self) -> MicroVolt { 1109 + // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to 1110 + // use it. 1111 + MicroVolt(unsafe { bindings::dev_pm_opp_get_voltage(self.as_raw()) }) 1112 + } 1113 + 1114 + /// Returns the level of an [`OPP`]. 1115 + #[inline] 1116 + pub fn level(&self) -> u32 { 1117 + // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to 1118 + // use it. 1119 + unsafe { bindings::dev_pm_opp_get_level(self.as_raw()) } 1120 + } 1121 + 1122 + /// Returns the power of an [`OPP`]. 1123 + #[inline] 1124 + pub fn power(&self) -> MicroWatt { 1125 + // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to 1126 + // use it. 1127 + MicroWatt(unsafe { bindings::dev_pm_opp_get_power(self.as_raw()) }) 1128 + } 1129 + 1130 + /// Returns the required pstate of an [`OPP`]. 1131 + #[inline] 1132 + pub fn required_pstate(&self, index: u32) -> u32 { 1133 + // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to 1134 + // use it. 1135 + unsafe { bindings::dev_pm_opp_get_required_pstate(self.as_raw(), index) } 1136 + } 1137 + 1138 + /// Returns true if the [`OPP`] is turbo. 1139 + #[inline] 1140 + pub fn is_turbo(&self) -> bool { 1141 + // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to 1142 + // use it. 1143 + unsafe { bindings::dev_pm_opp_is_turbo(self.as_raw()) } 1144 + } 1145 + }
+11 -22
rust/kernel/pci.rs
··· 360 360 } 361 361 } 362 362 363 - impl Device { 363 + impl<Ctx: device::DeviceContext> Device<Ctx> { 364 364 fn as_raw(&self) -> *mut bindings::pci_dev { 365 365 self.0.get() 366 366 } 367 + } 367 368 369 + impl Device { 368 370 /// Returns the PCI vendor ID. 369 371 pub fn vendor_id(&self) -> u16 { 370 372 // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`. ··· 390 388 // - by its type invariant `self.as_raw` is always a valid pointer to a `struct pci_dev`. 391 389 Ok(unsafe { bindings::pci_resource_len(self.as_raw(), bar.try_into()?) }) 392 390 } 391 + } 393 392 393 + impl Device<device::Bound> { 394 394 /// Mapps an entire PCI-BAR after performing a region-request on it. I/O operation bound checks 395 395 /// can be performed on compile time for offsets (plus the requested type size) < SIZE. 396 396 pub fn iomap_region_sized<const SIZE: usize>( ··· 426 422 } 427 423 } 428 424 429 - impl Deref for Device<device::Core> { 430 - type Target = Device; 431 - 432 - fn deref(&self) -> &Self::Target { 433 - let ptr: *const Self = self; 434 - 435 - // CAST: `Device<Ctx>` is a transparent wrapper of `Opaque<bindings::pci_dev>`. 436 - let ptr = ptr.cast::<Device>(); 437 - 438 - // SAFETY: `ptr` was derived from `&self`. 439 - unsafe { &*ptr } 440 - } 441 - } 442 - 443 - impl From<&Device<device::Core>> for ARef<Device> { 444 - fn from(dev: &Device<device::Core>) -> Self { 445 - (&**dev).into() 446 - } 447 - } 425 + // SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s generic 426 + // argument. 427 + kernel::impl_device_context_deref!(unsafe { Device }); 428 + kernel::impl_device_context_into_aref!(Device); 448 429 449 430 // SAFETY: Instances of `Device` are always reference-counted. 450 431 unsafe impl crate::types::AlwaysRefCounted for Device { ··· 444 455 } 445 456 } 446 457 447 - impl AsRef<device::Device> for Device { 448 - fn as_ref(&self) -> &device::Device { 458 + impl<Ctx: device::DeviceContext> AsRef<device::Device<Ctx>> for Device<Ctx> { 459 + fn as_ref(&self) -> &device::Device<Ctx> { 449 460 // SAFETY: By the type invariant of `Self`, `self.as_raw()` is a pointer to a valid 450 461 // `struct pci_dev`. 451 462 let dev = unsafe { addr_of_mut!((*self.as_raw()).dev) };
+8 -24
rust/kernel/platform.rs
··· 10 10 of, 11 11 prelude::*, 12 12 str::CStr, 13 - types::{ARef, ForeignOwnable, Opaque}, 13 + types::{ForeignOwnable, Opaque}, 14 14 ThisModule, 15 15 }; 16 16 17 17 use core::{ 18 18 marker::PhantomData, 19 - ops::Deref, 20 19 ptr::{addr_of_mut, NonNull}, 21 20 }; 22 21 ··· 183 184 PhantomData<Ctx>, 184 185 ); 185 186 186 - impl Device { 187 + impl<Ctx: device::DeviceContext> Device<Ctx> { 187 188 fn as_raw(&self) -> *mut bindings::platform_device { 188 189 self.0.get() 189 190 } 190 191 } 191 192 192 - impl Deref for Device<device::Core> { 193 - type Target = Device; 194 - 195 - fn deref(&self) -> &Self::Target { 196 - let ptr: *const Self = self; 197 - 198 - // CAST: `Device<Ctx>` is a transparent wrapper of `Opaque<bindings::platform_device>`. 199 - let ptr = ptr.cast::<Device>(); 200 - 201 - // SAFETY: `ptr` was derived from `&self`. 202 - unsafe { &*ptr } 203 - } 204 - } 205 - 206 - impl From<&Device<device::Core>> for ARef<Device> { 207 - fn from(dev: &Device<device::Core>) -> Self { 208 - (&**dev).into() 209 - } 210 - } 193 + // SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s generic 194 + // argument. 195 + kernel::impl_device_context_deref!(unsafe { Device }); 196 + kernel::impl_device_context_into_aref!(Device); 211 197 212 198 // SAFETY: Instances of `Device` are always reference-counted. 213 199 unsafe impl crate::types::AlwaysRefCounted for Device { ··· 207 223 } 208 224 } 209 225 210 - impl AsRef<device::Device> for Device { 211 - fn as_ref(&self) -> &device::Device { 226 + impl<Ctx: device::DeviceContext> AsRef<device::Device<Ctx>> for Device<Ctx> { 227 + fn as_ref(&self) -> &device::Device<Ctx> { 212 228 // SAFETY: By the type invariant of `Self`, `self.as_raw()` is a pointer to a valid 213 229 // `struct platform_device`. 214 230 let dev = unsafe { addr_of_mut!((*self.as_raw()).dev) };
+12 -8
rust/macros/module.rs
··· 185 185 186 186 let info = ModuleInfo::parse(&mut it); 187 187 188 - let mut modinfo = ModInfoBuilder::new(info.name.as_ref()); 188 + // Rust does not allow hyphens in identifiers, use underscore instead. 189 + let ident = info.name.replace('-', "_"); 190 + let mut modinfo = ModInfoBuilder::new(ident.as_ref()); 189 191 if let Some(author) = info.author { 190 192 modinfo.emit("author", &author); 191 193 } ··· 312 310 #[doc(hidden)] 313 311 #[link_section = \"{initcall_section}\"] 314 312 #[used] 315 - pub static __{name}_initcall: extern \"C\" fn() -> kernel::ffi::c_int = __{name}_init; 313 + pub static __{ident}_initcall: extern \"C\" fn() -> 314 + kernel::ffi::c_int = __{ident}_init; 316 315 317 316 #[cfg(not(MODULE))] 318 317 #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)] 319 318 core::arch::global_asm!( 320 319 r#\".section \"{initcall_section}\", \"a\" 321 - __{name}_initcall: 322 - .long __{name}_init - . 320 + __{ident}_initcall: 321 + .long __{ident}_init - . 323 322 .previous 324 323 \"# 325 324 ); ··· 328 325 #[cfg(not(MODULE))] 329 326 #[doc(hidden)] 330 327 #[no_mangle] 331 - pub extern \"C\" fn __{name}_init() -> kernel::ffi::c_int {{ 328 + pub extern \"C\" fn __{ident}_init() -> kernel::ffi::c_int {{ 332 329 // SAFETY: This function is inaccessible to the outside due to the double 333 330 // module wrapping it. It is called exactly once by the C side via its 334 331 // placement above in the initcall section. ··· 338 335 #[cfg(not(MODULE))] 339 336 #[doc(hidden)] 340 337 #[no_mangle] 341 - pub extern \"C\" fn __{name}_exit() {{ 338 + pub extern \"C\" fn __{ident}_exit() {{ 342 339 // SAFETY: 343 340 // - This function is inaccessible to the outside due to the double 344 341 // module wrapping it. It is called exactly once by the C side via its 345 342 // unique name, 346 - // - furthermore it is only called after `__{name}_init` has returned `0` 347 - // (which delegates to `__init`). 343 + // - furthermore it is only called after `__{ident}_init` has 344 + // returned `0` (which delegates to `__init`). 348 345 unsafe {{ __exit() }} 349 346 }} 350 347 ··· 384 381 ", 385 382 type_ = info.type_, 386 383 name = info.name, 384 + ident = ident, 387 385 modinfo = modinfo.buffer, 388 386 initcall_section = ".initcall6.init" 389 387 )