Commit 3cb4f9fa authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

lib: percpu_counter_sub

Hugh spotted that some code does:
  percpu_counter_add(&counter, -unsignedlong)

which, when the amount argument is of type s32, sort-of works thanks to
two's-complement. However when we'd change the type to s64 this breaks on 32bit
machines, because the promotion rules zero extend the unsigned number.

Provide percpu_counter_sub() to hide the s64 cast. That is:
  percpu_counter_sub(&counter, foo)
is equal to:
  percpu_counter_add(&counter, -(s64)foo);
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent aa0dff2d
...@@ -124,7 +124,7 @@ static int reserve_blocks(struct super_block *sb, int count) ...@@ -124,7 +124,7 @@ static int reserve_blocks(struct super_block *sb, int count)
return 0; return 0;
} }
percpu_counter_add(&sbi->s_freeblocks_counter, -count); percpu_counter_sub(&sbi->s_freeblocks_counter, count);
sb->s_dirt = 1; sb->s_dirt = 1;
return count; return count;
} }
......
...@@ -1633,7 +1633,7 @@ allocated: ...@@ -1633,7 +1633,7 @@ allocated:
gdp->bg_free_blocks_count = gdp->bg_free_blocks_count =
cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num); cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
spin_unlock(sb_bgl_lock(sbi, group_no)); spin_unlock(sb_bgl_lock(sbi, group_no));
percpu_counter_add(&sbi->s_freeblocks_counter, -num); percpu_counter_sub(&sbi->s_freeblocks_counter, num);
BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
err = ext3_journal_dirty_metadata(handle, gdp_bh); err = ext3_journal_dirty_metadata(handle, gdp_bh);
......
...@@ -1647,7 +1647,7 @@ allocated: ...@@ -1647,7 +1647,7 @@ allocated:
gdp->bg_free_blocks_count = gdp->bg_free_blocks_count =
cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num); cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
spin_unlock(sb_bgl_lock(sbi, group_no)); spin_unlock(sb_bgl_lock(sbi, group_no));
percpu_counter_add(&sbi->s_freeblocks_counter, -num); percpu_counter_sub(&sbi->s_freeblocks_counter, num);
BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
err = ext4_journal_dirty_metadata(handle, gdp_bh); err = ext4_journal_dirty_metadata(handle, gdp_bh);
......
...@@ -105,4 +105,9 @@ static inline void percpu_counter_dec(struct percpu_counter *fbc) ...@@ -105,4 +105,9 @@ static inline void percpu_counter_dec(struct percpu_counter *fbc)
percpu_counter_add(fbc, -1); percpu_counter_add(fbc, -1);
} }
static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
{
percpu_counter_add(fbc, -amount);
}
#endif /* _LINUX_PERCPU_COUNTER_H */ #endif /* _LINUX_PERCPU_COUNTER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment