Commit 0b79dada authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of...

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-fixes

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-fixes:
  sched: fix share (re)distribution
  softlockup: fix NOHZ wakeup
  seqlock: livelock fix
parents 50be4917 3f5087a2
...@@ -85,23 +85,29 @@ static inline int write_tryseqlock(seqlock_t *sl) ...@@ -85,23 +85,29 @@ static inline int write_tryseqlock(seqlock_t *sl)
/* Start of read calculation -- fetch last complete writer token */ /* Start of read calculation -- fetch last complete writer token */
static __always_inline unsigned read_seqbegin(const seqlock_t *sl) static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
{ {
unsigned ret = sl->sequence; unsigned ret;
repeat:
ret = sl->sequence;
smp_rmb(); smp_rmb();
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
}
return ret; return ret;
} }
/* Test if reader processed invalid data. /*
* If initial values is odd, * Test if reader processed invalid data.
* then writer had already started when section was entered *
* If sequence value changed * If sequence value changed then writer changed data while in section.
* then writer changed data while in section
*
* Using xor saves one conditional branch.
*/ */
static __always_inline int read_seqretry(const seqlock_t *sl, unsigned iv) static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
{ {
smp_rmb(); smp_rmb();
return (iv & 1) | (sl->sequence ^ iv);
return (sl->sequence != start);
} }
...@@ -122,20 +128,26 @@ typedef struct seqcount { ...@@ -122,20 +128,26 @@ typedef struct seqcount {
/* Start of read using pointer to a sequence counter only. */ /* Start of read using pointer to a sequence counter only. */
static inline unsigned read_seqcount_begin(const seqcount_t *s) static inline unsigned read_seqcount_begin(const seqcount_t *s)
{ {
unsigned ret = s->sequence; unsigned ret;
repeat:
ret = s->sequence;
smp_rmb(); smp_rmb();
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
}
return ret; return ret;
} }
/* Test if reader processed invalid data. /*
* Equivalent to: iv is odd or sequence number has changed. * Test if reader processed invalid data because sequence number has changed.
* (iv & 1) || (*s != iv)
* Using xor saves one conditional branch.
*/ */
static inline int read_seqcount_retry(const seqcount_t *s, unsigned iv) static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
{ {
smp_rmb(); smp_rmb();
return (iv & 1) | (s->sequence ^ iv);
return s->sequence != start;
} }
......
...@@ -1656,42 +1656,6 @@ void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd) ...@@ -1656,42 +1656,6 @@ void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd)
aggregate(tg, sd)->task_weight = task_weight; aggregate(tg, sd)->task_weight = task_weight;
} }
/*
* Redistribute tg->shares amongst all tg->cfs_rq[]s.
*/
static void __aggregate_redistribute_shares(struct task_group *tg)
{
int i, max_cpu = smp_processor_id();
unsigned long rq_weight = 0;
unsigned long shares, max_shares = 0, shares_rem = tg->shares;
for_each_possible_cpu(i)
rq_weight += tg->cfs_rq[i]->load.weight;
for_each_possible_cpu(i) {
/*
* divide shares proportional to the rq_weights.
*/
shares = tg->shares * tg->cfs_rq[i]->load.weight;
shares /= rq_weight + 1;
tg->cfs_rq[i]->shares = shares;
if (shares > max_shares) {
max_shares = shares;
max_cpu = i;
}
shares_rem -= shares;
}
/*
* Ensure it all adds up to tg->shares; we can loose a few
* due to rounding down when computing the per-cpu shares.
*/
if (shares_rem)
tg->cfs_rq[max_cpu]->shares += shares_rem;
}
/* /*
* Compute the weight of this group on the given cpus. * Compute the weight of this group on the given cpus.
*/ */
...@@ -1701,18 +1665,11 @@ void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd) ...@@ -1701,18 +1665,11 @@ void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd)
unsigned long shares = 0; unsigned long shares = 0;
int i; int i;
again:
for_each_cpu_mask(i, sd->span) for_each_cpu_mask(i, sd->span)
shares += tg->cfs_rq[i]->shares; shares += tg->cfs_rq[i]->shares;
/* if ((!shares && aggregate(tg, sd)->rq_weight) || shares > tg->shares)
* When the span doesn't have any shares assigned, but does have shares = tg->shares;
* tasks to run do a machine wide rebalance (should be rare).
*/
if (unlikely(!shares && aggregate(tg, sd)->rq_weight)) {
__aggregate_redistribute_shares(tg);
goto again;
}
aggregate(tg, sd)->shares = shares; aggregate(tg, sd)->shares = shares;
} }
......
...@@ -393,6 +393,7 @@ void tick_nohz_restart_sched_tick(void) ...@@ -393,6 +393,7 @@ void tick_nohz_restart_sched_tick(void)
sub_preempt_count(HARDIRQ_OFFSET); sub_preempt_count(HARDIRQ_OFFSET);
} }
touch_softlockup_watchdog();
/* /*
* Cancel the scheduled timer and restore the tick * Cancel the scheduled timer and restore the tick
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment