Commit bb758e96 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'timers-core-for-linus' of...

Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  hrtimers: fix warning in kernel/hrtimer.c
  x86: make sure we really have an hpet mapping before using it
  x86: enable HPET on Fujitsu u9200
  linux/timex.h: cleanup for userspace
  posix-timers: simplify de_thread()->exit_itimers() path
  posix-timers: check ->it_signal instead of ->it_pid to validate the timer
  posix-timers: use "struct pid*" instead of "struct task_struct*"
  nohz: suppress needless timer reprogramming
  clocksource, acpi_pm.c: put acpi_pm_read_slow() under CONFIG_PCI
  nohz: no softirq pending warnings for offline cpus
  hrtimer: removing all ur callback modes, fix
  hrtimer: removing all ur callback modes, fix hotplug
  hrtimer: removing all ur callback modes
  x86: correct link to HPET timer specification
  rtc-cmos: export second NVRAM bank

Fixed up conflicts in sound/drivers/pcsp/pcsp.c and sound/core/hrtimer.c
manually.
parents 5f34fe1c 32e8d186
......@@ -479,7 +479,7 @@ config HPET_TIMER
The HPET provides a stable time base on SMP
systems, unlike the TSC, but it is more expensive to access,
as it is off-chip. You can find the HPET spec at
<http://www.intel.com/hardwaredesign/hpetspec.htm>.
<http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
You can safely choose Y here. However, HPET will only be
activated if the platform and the BIOS support this feature.
......
......@@ -813,7 +813,7 @@ int __init hpet_enable(void)
out_nohpet:
hpet_clear_mapping();
boot_hpet_disable = 1;
hpet_address = 0;
return 0;
}
......@@ -836,9 +836,10 @@ static __init int hpet_late_init(void)
hpet_address = force_hpet_address;
hpet_enable();
}
if (!hpet_virt_address)
return -ENODEV;
}
hpet_reserve_platform_timers(hpet_readl(HPET_ID));
......
......@@ -168,6 +168,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
ich_force_enable_hpet);
......
......@@ -46,7 +46,7 @@
/*
* The High Precision Event Timer driver.
* This driver is closely modelled after the rtc.c driver.
* http://www.intel.com/hardwaredesign/hpetspec.htm
* http://www.intel.com/hardwaredesign/hpetspec_1.pdf
*/
#define HPET_USER_FREQ (64)
#define HPET_DRIFT (500)
......
......@@ -57,11 +57,6 @@ u32 acpi_pm_read_verified(void)
return v2;
}
static cycle_t acpi_pm_read_slow(void)
{
return (cycle_t)acpi_pm_read_verified();
}
static cycle_t acpi_pm_read(void)
{
return (cycle_t)read_pmtmr();
......@@ -88,6 +83,11 @@ static int __init acpi_pm_good_setup(char *__str)
}
__setup("acpi_pm_good", acpi_pm_good_setup);
static cycle_t acpi_pm_read_slow(void)
{
return (cycle_t)acpi_pm_read_verified();
}
static inline void acpi_pm_need_workaround(void)
{
clocksource_acpi_pm.read = acpi_pm_read_slow;
......
......@@ -697,7 +697,7 @@ static enum hrtimer_restart ads7846_timer(struct hrtimer *handle)
struct ads7846 *ts = container_of(handle, struct ads7846, timer);
int status = 0;
spin_lock_irq(&ts->lock);
spin_lock(&ts->lock);
if (unlikely(!get_pendown_state(ts) ||
device_suspended(&ts->spi->dev))) {
......@@ -728,7 +728,7 @@ static enum hrtimer_restart ads7846_timer(struct hrtimer *handle)
dev_err(&ts->spi->dev, "spi_async --> %d\n", status);
}
spin_unlock_irq(&ts->lock);
spin_unlock(&ts->lock);
return HRTIMER_NORESTART;
}
......
......@@ -773,7 +773,6 @@ static int de_thread(struct task_struct *tsk)
struct signal_struct *sig = tsk->signal;
struct sighand_struct *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock;
struct task_struct *leader = NULL;
int count;
if (thread_group_empty(tsk))
......@@ -811,7 +810,7 @@ static int de_thread(struct task_struct *tsk)
* and to assume its PID:
*/
if (!thread_group_leader(tsk)) {
leader = tsk->group_leader;
struct task_struct *leader = tsk->group_leader;
sig->notify_count = -1; /* for exit_notify() */
for (;;) {
......@@ -863,8 +862,9 @@ static int de_thread(struct task_struct *tsk)
BUG_ON(leader->exit_state != EXIT_ZOMBIE);
leader->exit_state = EXIT_DEAD;
write_unlock_irq(&tasklist_lock);
release_task(leader);
}
sig->group_exit_task = NULL;
......@@ -873,8 +873,6 @@ static int de_thread(struct task_struct *tsk)
no_thread_group:
exit_itimers(sig);
flush_itimer_signals();
if (leader)
release_task(leader);
if (atomic_read(&oldsighand->count) != 1) {
struct sighand_struct *newsighand;
......
......@@ -42,26 +42,6 @@ enum hrtimer_restart {
HRTIMER_RESTART, /* Timer must be restarted */
};
/*
* hrtimer callback modes:
*
* HRTIMER_CB_SOFTIRQ: Callback must run in softirq context
* HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context
* Special mode for tick emulation and
* scheduler timer. Such timers are per
* cpu and not allowed to be migrated on
* cpu unplug.
* HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context
* with timer->base lock unlocked
* used for timers which call wakeup to
* avoid lock order problems with rq->lock
*/
enum hrtimer_cb_mode {
HRTIMER_CB_SOFTIRQ,
HRTIMER_CB_IRQSAFE_PERCPU,
HRTIMER_CB_IRQSAFE_UNLOCKED,
};
/*
* Values to track state of the timer
*
......@@ -70,7 +50,6 @@ enum hrtimer_cb_mode {
* 0x00 inactive
* 0x01 enqueued into rbtree
* 0x02 callback function running
* 0x04 callback pending (high resolution mode)
*
* Special cases:
* 0x03 callback function running and enqueued
......@@ -92,8 +71,7 @@ enum hrtimer_cb_mode {
#define HRTIMER_STATE_INACTIVE 0x00
#define HRTIMER_STATE_ENQUEUED 0x01
#define HRTIMER_STATE_CALLBACK 0x02
#define HRTIMER_STATE_PENDING 0x04
#define HRTIMER_STATE_MIGRATE 0x08
#define HRTIMER_STATE_MIGRATE 0x04
/**
* struct hrtimer - the basic hrtimer structure
......@@ -109,8 +87,6 @@ enum hrtimer_cb_mode {
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* @cb_mode: high resolution timer feature to select the callback execution
* mode
* @cb_entry: list head to enqueue an expired timer into the callback list
* @start_site: timer statistics field to store the site where the timer
* was started
......@@ -129,7 +105,6 @@ struct hrtimer {
struct hrtimer_clock_base *base;
unsigned long state;
struct list_head cb_entry;
enum hrtimer_cb_mode cb_mode;
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
......@@ -188,15 +163,11 @@ struct hrtimer_clock_base {
* @check_clocks: Indictator, when set evaluate time source and clock
* event devices whether high resolution mode can be
* activated.
* @cb_pending: Expired timers are moved from the rbtree to this
* list in the timer interrupt. The list is processed
* in the softirq.
* @nr_events: Total number of timer interrupt events
*/
struct hrtimer_cpu_base {
spinlock_t lock;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
struct list_head cb_pending;
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires_next;
int hres_active;
......@@ -404,8 +375,7 @@ static inline int hrtimer_active(const struct hrtimer *timer)
*/
static inline int hrtimer_is_queued(struct hrtimer *timer)
{
return timer->state &
(HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING);
return timer->state & HRTIMER_STATE_ENQUEUED;
}
/*
......
......@@ -251,9 +251,6 @@ enum
BLOCK_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
#ifdef CONFIG_HIGH_RES_TIMERS
HRTIMER_SOFTIRQ,
#endif
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
......
......@@ -45,7 +45,11 @@ struct k_itimer {
int it_requeue_pending; /* waiting to requeue this timer */
#define REQUEUE_PENDING 1
int it_sigev_notify; /* notify word of sigevent struct */
struct task_struct *it_process; /* process to send signal to */
struct signal_struct *it_signal;
union {
struct pid *it_pid; /* pid of process to send signal to */
struct task_struct *it_process; /* for clock_nanosleep */
};
struct sigqueue *sigq; /* signal queue entry. */
union {
struct {
......
......@@ -53,46 +53,10 @@
#ifndef _LINUX_TIMEX_H
#define _LINUX_TIMEX_H
#include <linux/compiler.h>
#include <linux/time.h>
#include <asm/param.h>
#define NTP_API 4 /* NTP API version */
/*
* SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
* for a slightly underdamped convergence characteristic. SHIFT_KH
* establishes the damping of the FLL and is chosen by wisdom and black
* art.
*
* MAXTC establishes the maximum time constant of the PLL. With the
* SHIFT_KG and SHIFT_KF values given and a time constant range from
* zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
* respectively.
*/
#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
#define MAXTC 10 /* maximum time constant (shift) */
/*
* SHIFT_USEC defines the scaling (shift) of the time_freq and
* time_tolerance variables, which represent the current frequency
* offset and maximum frequency tolerance.
*/
#define SHIFT_USEC 16 /* frequency offset scale (shift) */
#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
#define PPM_SCALE_INV_SHIFT 19
#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
PPM_SCALE + 1)
#define MAXPHASE 500000000l /* max phase error (ns) */
#define MAXFREQ 500000 /* max frequency error (ns/s) */
#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
#define MINSEC 256 /* min interval between updates (s) */
#define MAXSEC 2048 /* max interval between updates (s) */
#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
/*
* syscall interface - used (mainly by NTP daemon)
* to discipline kernel clock oscillator
......@@ -199,8 +163,45 @@ struct timex {
#define TIME_BAD TIME_ERROR /* bw compat */
#ifdef __KERNEL__
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/param.h>
#include <asm/timex.h>
/*
* SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
* for a slightly underdamped convergence characteristic. SHIFT_KH
* establishes the damping of the FLL and is chosen by wisdom and black
* art.
*
* MAXTC establishes the maximum time constant of the PLL. With the
* SHIFT_KG and SHIFT_KF values given and a time constant range from
* zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
* respectively.
*/
#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
#define MAXTC 10 /* maximum time constant (shift) */
/*
* SHIFT_USEC defines the scaling (shift) of the time_freq and
* time_tolerance variables, which represent the current frequency
* offset and maximum frequency tolerance.
*/
#define SHIFT_USEC 16 /* frequency offset scale (shift) */
#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
#define PPM_SCALE_INV_SHIFT 19
#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
PPM_SCALE + 1)
#define MAXPHASE 500000000l /* max phase error (ns) */
#define MAXFREQ 500000 /* max frequency error (ns/s) */
#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
#define MINSEC 256 /* min interval between updates (s) */
#define MAXSEC 2048 /* max interval between updates (s) */
#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
/*
* kernel variables
* Note: maximum error = NTP synch distance = dispersion + delay / 2;
......
This diff is collapsed.
......@@ -116,7 +116,7 @@ static DEFINE_SPINLOCK(idr_lock);
* must supply functions here, even if the function just returns
* ENOSYS. The standard POSIX timer management code assumes the
* following: 1.) The k_itimer struct (sched.h) is used for the
* timer. 2.) The list, it_lock, it_clock, it_id and it_process
* timer. 2.) The list, it_lock, it_clock, it_id and it_pid
* fields are not modified by timer code.
*
* At this time all functions EXCEPT clock_nanosleep can be
......@@ -319,7 +319,8 @@ void do_schedule_next_timer(struct siginfo *info)
int posix_timer_event(struct k_itimer *timr, int si_private)
{
int shared, ret;
struct task_struct *task;
int shared, ret = -1;
/*
* FIXME: if ->sigq is queued we can race with
* dequeue_signal()->do_schedule_next_timer().
......@@ -333,8 +334,13 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
*/
timr->sigq->info.si_sys_private = si_private;
rcu_read_lock();
task = pid_task(timr->it_pid, PIDTYPE_PID);
if (task) {
shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
ret = send_sigqueue(timr->sigq, timr->it_process, shared);
ret = send_sigqueue(timr->sigq, task, shared);
}
rcu_read_unlock();
/* If we failed to send the signal the timer stops. */
return ret > 0;
}
......@@ -411,7 +417,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
return ret;
}
static struct task_struct * good_sigevent(sigevent_t * event)
static struct pid *good_sigevent(sigevent_t * event)
{
struct task_struct *rtn = current->group_leader;
......@@ -425,7 +431,7 @@ static struct task_struct * good_sigevent(sigevent_t * event)
((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
return NULL;
return rtn;
return task_pid(rtn);
}
void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
......@@ -464,6 +470,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
idr_remove(&posix_timers_id, tmr->it_id);
spin_unlock_irqrestore(&idr_lock, flags);
}
put_pid(tmr->it_pid);
sigqueue_free(tmr->sigq);
kmem_cache_free(posix_timers_cache, tmr);
}
......@@ -477,7 +484,6 @@ sys_timer_create(const clockid_t which_clock,
{
struct k_itimer *new_timer;
int error, new_timer_id;
struct task_struct *process;
sigevent_t event;
int it_id_set = IT_ID_NOT_SET;
......@@ -531,11 +537,9 @@ sys_timer_create(const clockid_t which_clock,
goto out;
}
rcu_read_lock();
process = good_sigevent(&event);
if (process)
get_task_struct(process);
new_timer->it_pid = get_pid(good_sigevent(&event));
rcu_read_unlock();
if (!process) {
if (!new_timer->it_pid) {
error = -EINVAL;
goto out;
}
......@@ -543,8 +547,7 @@ sys_timer_create(const clockid_t which_clock,
event.sigev_notify = SIGEV_SIGNAL;
event.sigev_signo = SIGALRM;
event.sigev_value.sival_int = new_timer->it_id;
process = current->group_leader;
get_task_struct(process);
new_timer->it_pid = get_pid(task_tgid(current));
}
new_timer->it_sigev_notify = event.sigev_notify;
......@@ -554,7 +557,7 @@ sys_timer_create(const clockid_t which_clock,
new_timer->sigq->info.si_code = SI_TIMER;
spin_lock_irq(&current->sighand->siglock);
new_timer->it_process = process;
new_timer->it_signal = current->signal;
list_add(&new_timer->list, &current->signal->posix_timers);
spin_unlock_irq(&current->sighand->siglock);
......@@ -589,8 +592,7 @@ static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
timr = idr_find(&posix_timers_id, (int)timer_id);
if (timr) {
spin_lock(&timr->it_lock);
if (timr->it_process &&
same_thread_group(timr->it_process, current)) {
if (timr->it_signal == current->signal) {
spin_unlock(&idr_lock);
return timr;
}
......@@ -837,8 +839,7 @@ retry_delete:
* This keeps any tasks waiting on the spin lock from thinking
* they got something (see the lock code above).
*/
put_task_struct(timer->it_process);
timer->it_process = NULL;
timer->it_signal = NULL;
unlock_timer(timer, flags);
release_posix_timer(timer, IT_ID_SET);
......@@ -864,8 +865,7 @@ retry_delete:
* This keeps any tasks waiting on the spin lock from thinking
* they got something (see the lock code above).
*/
put_task_struct(timer->it_process);
timer->it_process = NULL;
timer->it_signal = NULL;
unlock_timer(timer, flags);
release_posix_timer(timer, IT_ID_SET);
......
......@@ -209,7 +209,6 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rt_b->rt_period_timer.function = sched_rt_period_timer;
rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
}
static inline int rt_bandwidth_enabled(void)
......@@ -1139,7 +1138,6 @@ static void init_rq_hrtick(struct rq *rq)
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
......
......@@ -131,7 +131,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
{
enum hrtimer_restart res = HRTIMER_NORESTART;
write_seqlock_irq(&xtime_lock);
write_seqlock(&xtime_lock);
switch (time_state) {
case TIME_OK:
......@@ -164,7 +164,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
}
update_vsyscall(&xtime, clock);
write_sequnlock_irq(&xtime_lock);
write_sequnlock(&xtime_lock);
return res;
}
......
......@@ -247,7 +247,7 @@ void tick_nohz_stop_sched_tick(int inidle)
if (need_resched())
goto end;
if (unlikely(local_softirq_pending())) {
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
static int ratelimit;
if (ratelimit < 10) {
......@@ -282,8 +282,31 @@ void tick_nohz_stop_sched_tick(int inidle)
/* Schedule the tick, if we are at least one jiffie off */
if ((long)delta_jiffies >= 1) {
/*
* calculate the expiry time for the next timer wheel
* timer
*/
expires = ktime_add_ns(last_update, tick_period.tv64 *
delta_jiffies);
/*
* If this cpu is the one which updates jiffies, then
* give up the assignment and let it be taken by the
* cpu which runs the tick timer next, which might be
* this cpu as well. If we don't drop this here the
* jiffies might be stale and do_timer() never
* invoked.
*/
if (cpu == tick_do_timer_cpu)
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
if (delta_jiffies > 1)
cpu_set(cpu, nohz_cpu_mask);
/* Skip reprogram of event if its not changed */
if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
goto out;
/*
* nohz_stop_sched_tick can be called several times before
* the nohz_restart_sched_tick is called. This happens when
......@@ -306,17 +329,6 @@ void tick_nohz_stop_sched_tick(int inidle)
rcu_enter_nohz();
}
/*
* If this cpu is the one which updates jiffies, then
* give up the assignment and let it be taken by the
* cpu which runs the tick timer next, which might be
* this cpu as well. If we don't drop this here the
* jiffies might be stale and do_timer() never
* invoked.
*/
if (cpu == tick_do_timer_cpu)
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
ts->idle_sleeps++;
/*
......@@ -332,12 +344,7 @@ void tick_nohz_stop_sched_tick(int inidle)
goto out;
}
/*
* calculate the expiry time for the next timer wheel
* timer
*/
expires = ktime_add_ns(last_update, tick_period.tv64 *
delta_jiffies);
/* Mark expiries */
ts->idle_expires = expires;
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
......@@ -681,7 +688,6 @@ void tick_setup_sched_timer(void)
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ts->sched_timer.function = tick_sched_timer;
ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
/* Get the next period (per cpu) */
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
......
......@@ -202,7 +202,6 @@ static void start_stack_timer(int cpu)
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = stack_trace_timer_fn;
hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
}
......
......@@ -57,7 +57,6 @@ static int snd_hrtimer_open(struct snd_timer *t)
return -ENOMEM;
hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
stime->timer = t;
stime->hrt.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
stime->hrt.function = snd_hrtimer_callback;
t->private_data = stime;
return 0;
......
......@@ -96,7 +96,6 @@ static int __devinit snd_card_pcsp_probe(int devnum, struct device *dev)
return -EINVAL;
hrtimer_init(&pcsp_chip.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pcsp_chip.timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
pcsp_chip.timer.function = pcsp_do_timer;
card = snd_card_new(index, id, THIS_MODULE, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment