amd-pstate content for 6.15 (4/15/25)

Add a fix for X3D processors where depending upon what BIOS was
 set initially rankings might be set improperly.
 
 Add a fix for changing min/max limits while on the performance
 governor.
 -----BEGIN PGP SIGNATURE-----
 
 iQJOBAABCgA4FiEECwtuSU6dXvs5GA2aLRkspiR3AnYFAmf+yFYaHG1hcmlvLmxp
 bW9uY2llbGxvQGFtZC5jb20ACgkQLRkspiR3AnbteA//c5CHUfiYRhtEAr0sL37a
 JWpZDcZQ+r6m+AbTwV3KXx5C6QjAcoVWtvhMxAyanBxQn9ZmlnsHz9lXTUXCxGNb
 VVJoh4xpcOxAel9qEn1sZyYvNn0dGfK6bzUnHylt/Sebl14sTbduxLFnrUDci9oN
 wLUMcpzPmJIMJhlbqclI5CX4/XBeiPW2Wi67SDseozY9S0qUmLBH4MrXov2mEU1X
 Rc8HXHPQcOCYmVfhLBrY9keQkGZNi5cKqnM6Cm29k8BCUw7CfxZZ9QEahba12BjK
 t9R4wHZdEGjAsHE7L+l+xkVuuWAAOD7rIXRq1F2jxTbsBb8h22rYlKSdaFjU9WaV
 QoMhxnR3QyHNRLSZk0xGDrlybk1PWh6YGW6f5mBu2uOto+V8/jTCa4uYojEbgF8N
 RW8c46umo+1stWjfYHhOP0JFi/wAG23RFqkTE1xVXpXwTnF5dTrZeOAAwWbZ2T3I
 V+XVVuKNK0IunBbLgs8Q6s9eRjSEx0exTRS21M8jNYSac3V4x9BokJDyUYXFXtwe
 g2yKiQQWCWQWu6lvwHW8IuqAC55qPXd8f0Q+UFtt+yf0vazzmvymbjhZl/tHzDKX
 cWimrhrh0no8srIJWLXDld35U7m6YvfxWfaRUwRllotgJfIOtFPboKotqC3SXMuP
 KE+IqzWWpl8ZIuxD9QP8yoA=
 =IGEy
 -----END PGP SIGNATURE-----

Merge tag 'amd-pstate-v6.15-2025-04-15' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/superm1/linux

Merge amd-pstate content for 6.15 (4/15/25) from Mario Limonciello:

"Add a fix for X3D processors where depending upon what BIOS was
 set initially rankings might be set improperly.

 Add a fix for changing min/max limits while on the performance
 governor."

* tag 'amd-pstate-v6.15-2025-04-15' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/superm1/linux:
  cpufreq/amd-pstate: Enable ITMT support after initializing core rankings
  cpufreq/amd-pstate: Fix min_limit perf and freq updation for performance governor
This commit is contained in:
Rafael J. Wysocki 2025-04-17 17:55:09 +02:00
commit dead17b1a2

View File

@ -607,13 +607,16 @@ static void amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
union perf_cached perf = READ_ONCE(cpudata->perf);
perf.max_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->max);
perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf);
WRITE_ONCE(cpudata->max_limit_freq, policy->max);
WRITE_ONCE(cpudata->min_limit_freq, policy->min);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf);
WRITE_ONCE(cpudata->min_limit_freq, min(cpudata->nominal_freq, cpudata->max_limit_freq));
} else {
perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min);
WRITE_ONCE(cpudata->min_limit_freq, policy->min);
}
WRITE_ONCE(cpudata->perf, perf);
}
@ -791,16 +794,6 @@ static void amd_perf_ctl_reset(unsigned int cpu)
wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
}
/*
* Set amd-pstate preferred core enable can't be done directly from cpufreq callbacks
* due to locking, so queue the work for later.
*/
static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
{
sched_set_itmt_support();
}
static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
#define CPPC_MAX_PERF U8_MAX
static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
@ -811,14 +804,8 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
cpudata->hw_prefcore = true;
/*
* The priorities can be set regardless of whether or not
* sched_set_itmt_support(true) has been called and it is valid to
* update them at any time after it has been called.
*/
/* Priorities must be initialized before ITMT support can be toggled on. */
sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
schedule_work(&sched_prefcore_work);
}
static void amd_pstate_update_limits(unsigned int cpu)
@ -1193,6 +1180,9 @@ static ssize_t show_energy_performance_preference(
static void amd_pstate_driver_cleanup(void)
{
if (amd_pstate_prefcore)
sched_clear_itmt_support();
cppc_state = AMD_PSTATE_DISABLE;
current_pstate_driver = NULL;
}
@ -1235,6 +1225,10 @@ static int amd_pstate_register_driver(int mode)
return ret;
}
/* Enable ITMT support once all CPUs have initialized their asym priorities. */
if (amd_pstate_prefcore)
sched_set_itmt_support();
return 0;
}