Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
4 * Copyright (c) 2017, Intel Corporation.
5 * All rights reserved.
6 *
7 * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
8 */
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/cpufeature.h>
12#include <linux/cpuhotplug.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/topology.h>
16#include <linux/workqueue.h>
17
18#include <asm/cpu_device_id.h>
19#include <asm/intel-family.h>
20#include <asm/msr.h>
21
22#define MSR_OC_MAILBOX 0x150
23#define MSR_OC_MAILBOX_CMD_OFFSET 32
24#define MSR_OC_MAILBOX_RSP_OFFSET 32
25#define MSR_OC_MAILBOX_BUSY_BIT 63
26#define OC_MAILBOX_FC_CONTROL_CMD 0x1C
27
28/*
29 * Typical latency to get mail box response is ~3us, It takes +3 us to
30 * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz
31 * system. So for most of the time, the first mailbox read should have the
32 * response, but to avoid some boundary cases retry twice.
33 */
34#define OC_MAILBOX_RETRY_COUNT 2
35
36static int get_oc_core_priority(unsigned int cpu)
37{
38 u64 value, cmd = OC_MAILBOX_FC_CONTROL_CMD;
39 int ret, i;
40
41 /* Issue favored core read command */
42 value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
43 /* Set the busy bit to indicate OS is trying to issue command */
44 value |= BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
45 ret = wrmsrq_safe(MSR_OC_MAILBOX, value);
46 if (ret) {
47 pr_debug("cpu %d OC mailbox write failed\n", cpu);
48 return ret;
49 }
50
51 for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
52 ret = rdmsrq_safe(MSR_OC_MAILBOX, &value);
53 if (ret) {
54 pr_debug("cpu %d OC mailbox read failed\n", cpu);
55 break;
56 }
57
58 if (value & BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT)) {
59 pr_debug("cpu %d OC mailbox still processing\n", cpu);
60 ret = -EBUSY;
61 continue;
62 }
63
64 if ((value >> MSR_OC_MAILBOX_RSP_OFFSET) & 0xff) {
65 pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
66 ret = -ENXIO;
67 break;
68 }
69
70 ret = value & 0xff;
71 pr_debug("cpu %d max_ratio %d\n", cpu, ret);
72 break;
73 }
74
75 return ret;
76}
77
78/*
79 * The work item is needed to avoid CPU hotplug locking issues. The function
80 * itmt_legacy_set_priority() is called from CPU online callback, so can't
81 * call sched_set_itmt_support() from there as this function will aquire
82 * hotplug locks in its path.
83 */
84static void itmt_legacy_work_fn(struct work_struct *work)
85{
86 sched_set_itmt_support();
87}
88
89static DECLARE_WORK(sched_itmt_work, itmt_legacy_work_fn);
90
91static int itmt_legacy_cpu_online(unsigned int cpu)
92{
93 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
94 int priority;
95
96 priority = get_oc_core_priority(cpu);
97 if (priority < 0)
98 return 0;
99
100 sched_set_itmt_core_prio(priority, cpu);
101
102 /* Enable ITMT feature when a core with different priority is found */
103 if (max_highest_perf <= min_highest_perf) {
104 if (priority > max_highest_perf)
105 max_highest_perf = priority;
106
107 if (priority < min_highest_perf)
108 min_highest_perf = priority;
109
110 if (max_highest_perf > min_highest_perf)
111 schedule_work(&sched_itmt_work);
112 }
113
114 return 0;
115}
116
117static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
118 X86_MATCH_VFM(INTEL_BROADWELL_X, NULL),
119 X86_MATCH_VFM(INTEL_SKYLAKE_X, NULL),
120 {}
121};
122
123static int __init itmt_legacy_init(void)
124{
125 const struct x86_cpu_id *id;
126 int ret;
127
128 id = x86_match_cpu(itmt_legacy_cpu_ids);
129 if (!id)
130 return -ENODEV;
131
132 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
133 "platform/x86/turbo_max_3:online",
134 itmt_legacy_cpu_online, NULL);
135 if (ret < 0)
136 return ret;
137
138 return 0;
139}
140late_initcall(itmt_legacy_init)