Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
L
linux-davinci
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Redmine
Redmine
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Metrics
Environments
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
linux
linux-davinci
Commits
a0b6ca69
Commit
a0b6ca69
authored
Sep 15, 2009
by
Stephen Rothwell
Browse files
Options
Browse Files
Download
Plain Diff
Merge commit 'cpufreq/next'
parents
85c004ca
1a8e42fa
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
342 additions
and
171 deletions
+342
-171
Documentation/cpu-freq/user-guide.txt
Documentation/cpu-freq/user-guide.txt
+7
-2
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+21
-0
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+4
-25
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq.c
+187
-118
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_ondemand.c
+113
-26
include/linux/cpufreq.h
include/linux/cpufreq.h
+10
-0
No files found.
Documentation/cpu-freq/user-guide.txt
View file @
a0b6ca69
...
...
@@ -176,7 +176,9 @@ scaling_governor, and by "echoing" the name of another
work on some specific architectures or
processors.
cpuinfo_cur_freq : Current speed of the CPU, in KHz.
cpuinfo_cur_freq : Current frequency of the CPU as obtained from
the hardware, in KHz. This is the frequency
the CPU actually runs at.
scaling_available_frequencies : List of available frequencies, in KHz.
...
...
@@ -196,7 +198,10 @@ related_cpus : List of CPUs that need some sort of frequency
scaling_driver : Hardware driver for cpufreq.
scaling_cur_freq : Current frequency of the CPU, in KHz.
scaling_cur_freq : Current frequency of the CPU as determined by
the governor and cpufreq core, in KHz. This is
the frequency the kernel thinks the CPU runs
at.
If you have selected the "userspace" governor which allows you to
set the CPU operating frequency to a specific value, you can read out
...
...
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
View file @
a0b6ca69
...
...
@@ -588,6 +588,21 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = {
},
{
}
};
static
int
acpi_cpufreq_blacklist
(
struct
cpuinfo_x86
*
c
)
{
/* http://www.intel.com/Assets/PDF/specupdate/314554.pdf
* AL30: A Machine Check Exception (MCE) Occurring during an
* Enhanced Intel SpeedStep Technology Ratio Change May Cause
* Both Processor Cores to Lock Up when HT is enabled*/
if
(
c
->
x86_vendor
==
X86_VENDOR_INTEL
)
{
if
((
c
->
x86
==
15
)
&&
(
c
->
x86_model
==
6
)
&&
(
c
->
x86_mask
==
8
)
&&
smt_capable
())
return
-
ENODEV
;
}
return
0
;
}
#endif
static
int
acpi_cpufreq_cpu_init
(
struct
cpufreq_policy
*
policy
)
...
...
@@ -602,6 +617,12 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
dprintk
(
"acpi_cpufreq_cpu_init
\n
"
);
#ifdef CONFIG_SMP
result
=
acpi_cpufreq_blacklist
(
c
);
if
(
result
)
return
result
;
#endif
data
=
kzalloc
(
sizeof
(
struct
acpi_cpufreq_data
),
GFP_KERNEL
);
if
(
!
data
)
return
-
ENOMEM
;
...
...
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
View file @
a0b6ca69
...
...
@@ -854,6 +854,10 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
goto
err_out
;
}
/* fill in data */
data
->
numps
=
data
->
acpi_data
.
state_count
;
powernow_k8_acpi_pst_values
(
data
,
0
);
if
(
cpu_family
==
CPU_HW_PSTATE
)
ret_val
=
fill_powernow_table_pstate
(
data
,
powernow_table
);
else
...
...
@@ -866,11 +870,8 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
powernow_table
[
data
->
acpi_data
.
state_count
].
index
=
0
;
data
->
powernow_table
=
powernow_table
;
/* fill in data */
data
->
numps
=
data
->
acpi_data
.
state_count
;
if
(
cpumask_first
(
cpu_core_mask
(
data
->
cpu
))
==
data
->
cpu
)
print_basics
(
data
);
powernow_k8_acpi_pst_values
(
data
,
0
);
/* notify BIOS that we exist */
acpi_processor_notify_smm
(
THIS_MODULE
);
...
...
@@ -941,7 +942,6 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
struct
cpufreq_frequency_table
*
powernow_table
)
{
int
i
;
int
cntlofreq
=
0
;
for
(
i
=
0
;
i
<
data
->
acpi_data
.
state_count
;
i
++
)
{
u32
fid
;
...
...
@@ -982,27 +982,6 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
continue
;
}
/* verify only 1 entry from the lo frequency table */
if
(
fid
<
HI_FID_TABLE_BOTTOM
)
{
if
(
cntlofreq
)
{
/* if both entries are the same,
* ignore this one ... */
if
((
freq
!=
powernow_table
[
cntlofreq
].
frequency
)
||
(
index
!=
powernow_table
[
cntlofreq
].
index
))
{
printk
(
KERN_ERR
PFX
"Too many lo freq table "
"entries
\n
"
);
return
1
;
}
dprintk
(
"double low frequency table entry, "
"ignoring it.
\n
"
);
invalidate_entry
(
data
,
i
);
continue
;
}
else
cntlofreq
=
i
;
}
if
(
freq
!=
(
data
->
acpi_data
.
states
[
i
].
core_frequency
*
1000
))
{
printk
(
KERN_INFO
PFX
"invalid freq entries "
"%u kHz vs. %u kHz
\n
"
,
freq
,
...
...
drivers/cpufreq/cpufreq.c
View file @
a0b6ca69
...
...
@@ -61,6 +61,8 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
* are concerned with are online after they get the lock.
* - Governor routines that can be called in cpufreq hotplug path should not
* take this sem as top level hotplug notifier handler takes this.
* - Lock should not be held across
* __cpufreq_governor(data, CPUFREQ_GOV_STOP);
*/
static
DEFINE_PER_CPU
(
int
,
policy_cpu
);
static
DEFINE_PER_CPU
(
struct
rw_semaphore
,
cpu_policy_rwsem
);
...
...
@@ -686,6 +688,9 @@ static struct attribute *default_attrs[] = {
NULL
};
struct
kobject
*
cpufreq_global_kobject
;
EXPORT_SYMBOL
(
cpufreq_global_kobject
);
#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
#define to_attr(a) container_of(a, struct freq_attr, attr)
...
...
@@ -756,92 +761,20 @@ static struct kobj_type ktype_cpufreq = {
.
release
=
cpufreq_sysfs_release
,
};
/**
* cpufreq_add_dev - add a CPU device
*
* Adds the cpufreq interface for a CPU device.
*
* The Oracle says: try running cpufreq registration/unregistration concurrently
* with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu
/*
* Returns:
* Negative: Failure
* 0: Success
* Positive: When we have a managed CPU and the sysfs got symlinked
*/
static
int
cpufreq_add_dev
(
struct
sys_device
*
sys_dev
)
int
cpufreq_add_dev_policy
(
unsigned
int
cpu
,
struct
cpufreq_policy
*
policy
,
struct
sys_device
*
sys_dev
)
{
unsigned
int
cpu
=
sys_dev
->
id
;
int
ret
=
0
;
struct
cpufreq_policy
new_policy
;
struct
cpufreq_policy
*
policy
;
struct
freq_attr
**
drv_attr
;
struct
sys_device
*
cpu_sys_dev
;
#ifdef CONFIG_SMP
unsigned
long
flags
;
unsigned
int
j
;
if
(
cpu_is_offline
(
cpu
))
return
0
;
cpufreq_debug_disable_ratelimit
();
dprintk
(
"adding CPU %u
\n
"
,
cpu
);
#ifdef CONFIG_SMP
/* check whether a different CPU already registered this
* CPU because it is in the same boat. */
policy
=
cpufreq_cpu_get
(
cpu
);
if
(
unlikely
(
policy
))
{
cpufreq_cpu_put
(
policy
);
cpufreq_debug_enable_ratelimit
();
return
0
;
}
#endif
if
(
!
try_module_get
(
cpufreq_driver
->
owner
))
{
ret
=
-
EINVAL
;
goto
module_out
;
}
policy
=
kzalloc
(
sizeof
(
struct
cpufreq_policy
),
GFP_KERNEL
);
if
(
!
policy
)
{
ret
=
-
ENOMEM
;
goto
nomem_out
;
}
if
(
!
alloc_cpumask_var
(
&
policy
->
cpus
,
GFP_KERNEL
))
{
ret
=
-
ENOMEM
;
goto
err_free_policy
;
}
if
(
!
zalloc_cpumask_var
(
&
policy
->
related_cpus
,
GFP_KERNEL
))
{
ret
=
-
ENOMEM
;
goto
err_free_cpumask
;
}
policy
->
cpu
=
cpu
;
cpumask_copy
(
policy
->
cpus
,
cpumask_of
(
cpu
));
/* Initially set CPU itself as the policy_cpu */
per_cpu
(
policy_cpu
,
cpu
)
=
cpu
;
ret
=
(
lock_policy_rwsem_write
(
cpu
)
<
0
);
WARN_ON
(
ret
);
init_completion
(
&
policy
->
kobj_unregister
);
INIT_WORK
(
&
policy
->
update
,
handle_update
);
/* Set governor before ->init, so that driver could check it */
policy
->
governor
=
CPUFREQ_DEFAULT_GOVERNOR
;
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
*/
ret
=
cpufreq_driver
->
init
(
policy
);
if
(
ret
)
{
dprintk
(
"initialization failed
\n
"
);
goto
err_unlock_policy
;
}
policy
->
user_policy
.
min
=
policy
->
min
;
policy
->
user_policy
.
max
=
policy
->
max
;
blocking_notifier_call_chain
(
&
cpufreq_policy_notifier_list
,
CPUFREQ_START
,
policy
);
#ifdef CONFIG_SMP
#ifdef CONFIG_HOTPLUG_CPU
if
(
per_cpu
(
cpufreq_cpu_governor
,
cpu
))
{
policy
->
governor
=
per_cpu
(
cpufreq_cpu_governor
,
cpu
);
...
...
@@ -872,9 +805,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
/* Should not go through policy unlock path */
if
(
cpufreq_driver
->
exit
)
cpufreq_driver
->
exit
(
policy
);
ret
=
-
EBUSY
;
cpufreq_cpu_put
(
managed_policy
);
goto
err_free_cpumask
;
return
-
EBUSY
;
}
spin_lock_irqsave
(
&
cpufreq_driver_lock
,
flags
);
...
...
@@ -893,17 +825,62 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
* Call driver->exit() because only the cpu parent of
* the kobj needed to call init().
*/
goto
out_driver_exit
;
/* call driver->exit() */
if
(
cpufreq_driver
->
exit
)
cpufreq_driver
->
exit
(
policy
);
if
(
!
ret
)
return
1
;
else
return
ret
;
}
}
#endif
memcpy
(
&
new_policy
,
policy
,
sizeof
(
struct
cpufreq_policy
));
return
ret
;
}
/* symlink affected CPUs */
int
cpufreq_add_dev_symlink
(
unsigned
int
cpu
,
struct
cpufreq_policy
*
policy
)
{
unsigned
int
j
;
int
ret
=
0
;
for_each_cpu
(
j
,
policy
->
cpus
)
{
struct
cpufreq_policy
*
managed_policy
;
struct
sys_device
*
cpu_sys_dev
;
if
(
j
==
cpu
)
continue
;
if
(
!
cpu_online
(
j
))
continue
;
dprintk
(
"CPU %u already managed, adding link
\n
"
,
j
);
managed_policy
=
cpufreq_cpu_get
(
cpu
);
cpu_sys_dev
=
get_cpu_sysdev
(
j
);
ret
=
sysfs_create_link
(
&
cpu_sys_dev
->
kobj
,
&
policy
->
kobj
,
"cpufreq"
);
if
(
ret
)
{
cpufreq_cpu_put
(
managed_policy
);
return
ret
;
}
}
return
ret
;
}
int
cpufreq_add_dev_interface
(
unsigned
int
cpu
,
struct
cpufreq_policy
*
policy
,
struct
sys_device
*
sys_dev
)
{
struct
cpufreq_policy
new_policy
;
struct
freq_attr
**
drv_attr
;
unsigned
long
flags
;
int
ret
=
0
;
unsigned
int
j
;
/* prepare interface data */
ret
=
kobject_init_and_add
(
&
policy
->
kobj
,
&
ktype_cpufreq
,
&
sys_dev
->
kobj
,
"cpufreq"
);
ret
=
kobject_init_and_add
(
&
policy
->
kobj
,
&
ktype_cpufreq
,
&
sys_dev
->
kobj
,
"cpufreq"
);
if
(
ret
)
goto
out_driver_exi
t
;
return
re
t
;
/* set up files for this cpu device */
drv_attr
=
cpufreq_driver
->
attr
;
...
...
@@ -926,35 +903,20 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
spin_lock_irqsave
(
&
cpufreq_driver_lock
,
flags
);
for_each_cpu
(
j
,
policy
->
cpus
)
{
if
(
!
cpu_online
(
j
))
continue
;
if
(
!
cpu_online
(
j
))
continue
;
per_cpu
(
cpufreq_cpu_data
,
j
)
=
policy
;
per_cpu
(
policy_cpu
,
j
)
=
policy
->
cpu
;
}
spin_unlock_irqrestore
(
&
cpufreq_driver_lock
,
flags
);
/* symlink affected CPUs */
for_each_cpu
(
j
,
policy
->
cpus
)
{
struct
cpufreq_policy
*
managed_policy
;
if
(
j
==
cpu
)
continue
;
if
(
!
cpu_online
(
j
))
continue
;
dprintk
(
"CPU %u already managed, adding link
\n
"
,
j
);
managed_policy
=
cpufreq_cpu_get
(
cpu
);
cpu_sys_dev
=
get_cpu_sysdev
(
j
);
ret
=
sysfs_create_link
(
&
cpu_sys_dev
->
kobj
,
&
policy
->
kobj
,
"cpufreq"
);
if
(
ret
)
{
cpufreq_cpu_put
(
managed_policy
);
goto
err_out_unregister
;
}
}
ret
=
cpufreq_add_dev_symlink
(
cpu
,
policy
);
if
(
ret
)
goto
err_out_kobj_put
;
policy
->
governor
=
NULL
;
/* to assure that the starting sequence is
* run in cpufreq_set_policy */
memcpy
(
&
new_policy
,
policy
,
sizeof
(
struct
cpufreq_policy
));
/* assure that the starting sequence is run in __cpufreq_set_policy */
policy
->
governor
=
NULL
;
/* set default policy */
ret
=
__cpufreq_set_policy
(
policy
,
&
new_policy
);
...
...
@@ -963,8 +925,107 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
if
(
ret
)
{
dprintk
(
"setting policy failed
\n
"
);
goto
err_out_unregister
;
if
(
cpufreq_driver
->
exit
)
cpufreq_driver
->
exit
(
policy
);
}
return
ret
;
err_out_kobj_put:
kobject_put
(
&
policy
->
kobj
);
wait_for_completion
(
&
policy
->
kobj_unregister
);
return
ret
;
}
/**
* cpufreq_add_dev - add a CPU device
*
* Adds the cpufreq interface for a CPU device.
*
* The Oracle says: try running cpufreq registration/unregistration concurrently
* with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu
*/
static
int
cpufreq_add_dev
(
struct
sys_device
*
sys_dev
)
{
unsigned
int
cpu
=
sys_dev
->
id
;
int
ret
=
0
;
struct
cpufreq_policy
*
policy
;
unsigned
long
flags
;
unsigned
int
j
;
if
(
cpu_is_offline
(
cpu
))
return
0
;
cpufreq_debug_disable_ratelimit
();
dprintk
(
"adding CPU %u
\n
"
,
cpu
);
#ifdef CONFIG_SMP
/* check whether a different CPU already registered this
* CPU because it is in the same boat. */
policy
=
cpufreq_cpu_get
(
cpu
);
if
(
unlikely
(
policy
))
{
cpufreq_cpu_put
(
policy
);
cpufreq_debug_enable_ratelimit
();
return
0
;
}
#endif
if
(
!
try_module_get
(
cpufreq_driver
->
owner
))
{
ret
=
-
EINVAL
;
goto
module_out
;
}
ret
=
-
ENOMEM
;
policy
=
kzalloc
(
sizeof
(
struct
cpufreq_policy
),
GFP_KERNEL
);
if
(
!
policy
)
goto
nomem_out
;
if
(
!
alloc_cpumask_var
(
&
policy
->
cpus
,
GFP_KERNEL
))
goto
err_free_policy
;
if
(
!
zalloc_cpumask_var
(
&
policy
->
related_cpus
,
GFP_KERNEL
))
goto
err_free_cpumask
;
policy
->
cpu
=
cpu
;
cpumask_copy
(
policy
->
cpus
,
cpumask_of
(
cpu
));
/* Initially set CPU itself as the policy_cpu */
per_cpu
(
policy_cpu
,
cpu
)
=
cpu
;
ret
=
(
lock_policy_rwsem_write
(
cpu
)
<
0
);
WARN_ON
(
ret
);
init_completion
(
&
policy
->
kobj_unregister
);
INIT_WORK
(
&
policy
->
update
,
handle_update
);
/* Set governor before ->init, so that driver could check it */
policy
->
governor
=
CPUFREQ_DEFAULT_GOVERNOR
;
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
*/
ret
=
cpufreq_driver
->
init
(
policy
);
if
(
ret
)
{
dprintk
(
"initialization failed
\n
"
);
goto
err_unlock_policy
;
}
policy
->
user_policy
.
min
=
policy
->
min
;
policy
->
user_policy
.
max
=
policy
->
max
;
blocking_notifier_call_chain
(
&
cpufreq_policy_notifier_list
,
CPUFREQ_START
,
policy
);
ret
=
cpufreq_add_dev_policy
(
cpu
,
policy
,
sys_dev
);
if
(
ret
)
{
if
(
ret
>
0
)
/* This is a managed cpu, symlink created,
exit with 0 */
ret
=
0
;
goto
err_unlock_policy
;
}
ret
=
cpufreq_add_dev_interface
(
cpu
,
policy
,
sys_dev
);
if
(
ret
)
goto
err_out_unregister
;
unlock_policy_rwsem_write
(
cpu
);
...
...
@@ -982,14 +1043,9 @@ err_out_unregister:
per_cpu
(
cpufreq_cpu_data
,
j
)
=
NULL
;
spin_unlock_irqrestore
(
&
cpufreq_driver_lock
,
flags
);
err_out_kobj_put:
kobject_put
(
&
policy
->
kobj
);
wait_for_completion
(
&
policy
->
kobj_unregister
);
out_driver_exit:
if
(
cpufreq_driver
->
exit
)
cpufreq_driver
->
exit
(
policy
);
err_unlock_policy:
unlock_policy_rwsem_write
(
cpu
);
err_free_cpumask:
...
...
@@ -1653,8 +1709,17 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
dprintk
(
"governor switch
\n
"
);
/* end old governor */
if
(
data
->
governor
)
if
(
data
->
governor
)
{
/*
* Need to release the rwsem around governor
* stop due to lock dependency between
* cancel_delayed_work_sync and the read lock
* taken in the delayed work handler.
*/
unlock_policy_rwsem_write
(
data
->
cpu
);
__cpufreq_governor
(
data
,
CPUFREQ_GOV_STOP
);
lock_policy_rwsem_write
(
data
->
cpu
);
}
/* start new governor */
data
->
governor
=
policy
->
governor
;
...
...
@@ -1884,7 +1949,11 @@ static int __init cpufreq_core_init(void)
per_cpu
(
policy_cpu
,
cpu
)
=
-
1
;
init_rwsem
(
&
per_cpu
(
cpu_policy_rwsem
,
cpu
));
}
cpufreq_global_kobject
=
kobject_create_and_add
(
"cpufreq"
,
&
cpu_sysdev_class
.
kset
.
kobj
);
BUG_ON
(
!
cpufreq_global_kobject
);
return
0
;
}
core_initcall
(
cpufreq_core_init
);
drivers/cpufreq/cpufreq_ondemand.c
View file @
a0b6ca69
...
...
@@ -55,6 +55,18 @@ static unsigned int min_sampling_rate;
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
static
void
do_dbs_timer
(
struct
work_struct
*
work
);
static
int
cpufreq_governor_dbs
(
struct
cpufreq_policy
*
policy
,
unsigned
int
event
);
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
static
#endif
struct
cpufreq_governor
cpufreq_gov_ondemand
=
{
.
name
=
"ondemand"
,
.
governor
=
cpufreq_governor_dbs
,
.
max_transition_latency
=
TRANSITION_LATENCY_LIMIT
,
.
owner
=
THIS_MODULE
,
};
/* Sampling types */
enum
{
DBS_NORMAL_SAMPLE
,
DBS_SUB_SAMPLE
};
...
...
@@ -206,20 +218,23 @@ static void ondemand_powersave_bias_init(void)
}
/************************** sysfs interface ************************/
static
ssize_t
show_sampling_rate_max
(
struct
cpufreq_policy
*
policy
,
char
*
buf
)
static
ssize_t
show_sampling_rate_max
(
struct
kobject
*
kobj
,
struct
attribute
*
attr
,
char
*
buf
)
{
printk_once
(
KERN_INFO
"CPUFREQ: ondemand sampling_rate_max "
"sysfs file is deprecated - used by: %s
\n
"
,
current
->
comm
);
return
sprintf
(
buf
,
"%u
\n
"
,
-
1U
);
}
static
ssize_t
show_sampling_rate_min
(
struct
cpufreq_policy
*
policy
,
char
*
buf
)
static
ssize_t
show_sampling_rate_min
(
struct
kobject
*
kobj
,
struct
attribute
*
attr
,
char
*
buf
)
{
return
sprintf
(
buf
,
"%u
\n
"
,
min_sampling_rate
);
}
#define define_one_ro(_name) \
static struct
freq_attr _name =
\
static struct
global_attr _name =
\
__ATTR(_name, 0444, show_##_name, NULL)
define_one_ro
(
sampling_rate_max
);
...
...
@@ -228,7 +243,7 @@ define_one_ro(sampling_rate_min);
/* cpufreq_ondemand Governor Tunables */
#define show_one(file_name, object) \
static ssize_t show_##file_name \
(struct
cpufreq_policy *unused, char *buf)
\
(struct
kobject *kobj, struct attribute *attr, char *buf)
\
{ \
return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
}
...
...
@@ -237,8 +252,38 @@ show_one(up_threshold, up_threshold);
show_one
(
ignore_nice_load
,
ignore_nice
);
show_one
(
powersave_bias
,
powersave_bias
);
static
ssize_t
store_sampling_rate
(
struct
cpufreq_policy
*
unused
,
const
char
*
buf
,
size_t
count
)
/*** delete after deprecation time ***/
#define DEPRECATION_MSG(file_name) \
printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
"interface is deprecated - " #file_name "\n");
#define show_one_old(file_name) \
static ssize_t show_##file_name##_old \
(struct cpufreq_policy *unused, char *buf) \
{ \
printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
"interface is deprecated - " #file_name "\n"); \
return show_##file_name(NULL, NULL, buf); \
}
show_one_old
(
sampling_rate
);
show_one_old
(
up_threshold
);
show_one_old
(
ignore_nice_load
);
show_one_old
(
powersave_bias
);
show_one_old
(
sampling_rate_min
);
show_one_old
(
sampling_rate_max
);
#define define_one_ro_old(object, _name) \
static struct freq_attr object = \
__ATTR(_name, 0444, show_##_name##_old, NULL)
define_one_ro_old
(
sampling_rate_min_old
,
sampling_rate_min
);
define_one_ro_old
(
sampling_rate_max_old
,
sampling_rate_max
);
/*** delete after deprecation time ***/
static
ssize_t
store_sampling_rate
(
struct
kobject
*
a
,
struct
attribute
*
b
,
const
char
*
buf
,
size_t
count
)
{
unsigned
int
input
;
int
ret
;
...
...
@@ -253,8 +298,8 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
return
count
;
}
static
ssize_t
store_up_threshold
(
struct
cpufreq_policy
*
unused
,
const
char
*
buf
,
size_t
count
)
static
ssize_t
store_up_threshold
(
struct
kobject
*
a
,
struct
attribute
*
b
,
const
char
*
buf
,
size_t
count
)
{
unsigned
int
input
;
int
ret
;
...
...
@@ -272,8 +317,8 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
return
count
;
}
static
ssize_t
store_ignore_nice_load
(
struct
cpufreq_policy
*
policy
,
const
char
*
buf
,
size_t
count
)
static
ssize_t
store_ignore_nice_load
(
struct
kobject
*
a
,
struct
attribute
*
b
,
const
char
*
buf
,
size_t
count
)
{
unsigned
int
input
;
int
ret
;
...
...
@@ -309,8 +354,8 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
return
count
;
}
static
ssize_t
store_powersave_bias
(
struct
cpufreq_policy
*
unused
,
const
char
*
buf
,
size_t
count
)
static
ssize_t
store_powersave_bias
(
struct
kobject
*
a
,
struct
attribute
*
b
,
const
char
*
buf
,
size_t
count
)
{
unsigned
int
input
;
int
ret
;
...
...
@@ -331,7 +376,7 @@ static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
}
#define define_one_rw(_name) \
static struct
freq
_attr _name = \
static struct
global
_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
define_one_rw
(
sampling_rate
);
...
...
@@ -354,6 +399,47 @@ static struct attribute_group dbs_attr_group = {
.
name
=
"ondemand"
,
};
/*** delete after deprecation time ***/
#define write_one_old(file_name) \
static ssize_t store_##file_name##_old \
(struct cpufreq_policy *unused, const char *buf, size_t count) \
{ \
printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
"interface is deprecated - " #file_name "\n"); \
return store_##file_name(NULL, NULL, buf, count); \
}
write_one_old
(
sampling_rate
);
write_one_old
(
up_threshold
);
write_one_old
(
ignore_nice_load
);
write_one_old
(
powersave_bias
);
#define define_one_rw_old(object, _name) \
static struct freq_attr object = \
__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
define_one_rw_old
(
sampling_rate_old
,
sampling_rate
);
define_one_rw_old
(
up_threshold_old
,
up_threshold
);
define_one_rw_old
(
ignore_nice_load_old
,
ignore_nice_load
);
define_one_rw_old
(
powersave_bias_old
,
powersave_bias
);
static
struct
attribute
*
dbs_attributes_old
[]
=
{
&
sampling_rate_max_old
.
attr
,
&
sampling_rate_min_old
.
attr
,
&
sampling_rate_old
.
attr
,
&
up_threshold_old
.
attr
,
&
ignore_nice_load_old
.
attr
,
&
powersave_bias_old
.
attr
,
NULL
};
static
struct
attribute_group
dbs_attr_group_old
=
{
.
attrs
=
dbs_attributes_old
,
.
name
=
"ondemand"
,
};
/*** delete after deprecation time ***/
/************************** sysfs end ************************/
static
void
dbs_check_cpu
(
struct
cpu_dbs_info_s
*
this_dbs_info
)
...
...
@@ -544,7 +630,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock
(
&
dbs_mutex
);
rc
=
sysfs_create_group
(
&
policy
->
kobj
,
&
dbs_attr_group
);
rc
=
sysfs_create_group
(
&
policy
->
kobj
,
&
dbs_attr_group
_old
);
if
(
rc
)
{
mutex_unlock
(
&
dbs_mutex
);
return
rc
;
...
...
@@ -565,13 +651,20 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
}
this_dbs_info
->
cpu
=
cpu
;
ondemand_powersave_bias_init_cpu
(
cpu
);
mutex_init
(
&
this_dbs_info
->
timer_mutex
);
/*
* Start the timerschedule work, when this governor
* is used for first time
*/
if
(
dbs_enable
==
1
)
{
unsigned
int
latency
;
rc
=
sysfs_create_group
(
cpufreq_global_kobject
,
&
dbs_attr_group
);
if
(
rc
)
{
mutex_unlock
(
&
dbs_mutex
);
return
rc
;
}
/* policy latency is in nS. Convert it to uS first */
latency
=
policy
->
cpuinfo
.
transition_latency
/
1000
;
if
(
latency
==
0
)
...
...
@@ -585,6 +678,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
}
mutex_unlock
(
&
dbs_mutex
);
mutex_init
(
&
this_dbs_info
->
timer_mutex
);
dbs_timer_init
(
this_dbs_info
);
break
;
...
...
@@ -592,10 +686,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit
(
this_dbs_info
);
mutex_lock
(
&
dbs_mutex
);
sysfs_remove_group
(
&
policy
->
kobj
,
&
dbs_attr_group
);
sysfs_remove_group
(
&
policy
->
kobj
,
&
dbs_attr_group
_old
);
mutex_destroy
(
&
this_dbs_info
->
timer_mutex
);
dbs_enable
--
;
mutex_unlock
(
&
dbs_mutex
);
if
(
!
dbs_enable
)
sysfs_remove_group
(
cpufreq_global_kobject
,
&
dbs_attr_group
);
break
;
...
...
@@ -613,16 +710,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return
0
;
}
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
static
#endif
struct
cpufreq_governor
cpufreq_gov_ondemand
=
{
.
name
=
"ondemand"
,
.
governor
=
cpufreq_governor_dbs
,
.
max_transition_latency
=
TRANSITION_LATENCY_LIMIT
,
.
owner
=
THIS_MODULE
,
};
static
int
__init
cpufreq_gov_dbs_init
(
void
)
{
int
err
;
...
...
include/linux/cpufreq.h
View file @
a0b6ca69
...
...
@@ -65,6 +65,9 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
struct
cpufreq_governor
;
/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
extern
struct
kobject
*
cpufreq_global_kobject
;
#define CPUFREQ_ETERNAL (-1)
struct
cpufreq_cpuinfo
{
unsigned
int
max_freq
;
...
...
@@ -274,6 +277,13 @@ struct freq_attr {
ssize_t
(
*
store
)(
struct
cpufreq_policy
*
,
const
char
*
,
size_t
count
);
};
struct
global_attr
{
struct
attribute
attr
;
ssize_t
(
*
show
)(
struct
kobject
*
kobj
,
struct
attribute
*
attr
,
char
*
buf
);
ssize_t
(
*
store
)(
struct
kobject
*
a
,
struct
attribute
*
b
,
const
char
*
c
,
size_t
count
);
};
/*********************************************************************
* CPUFREQ 2.6. INTERFACE *
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment