Commit ec12fa5c authored by Randy Dunlap's avatar Randy Dunlap Committed by Ingo Molnar

x86: bitops_32.h style cleanups

Coding style cleanups in x86/bitops_32.h:

- drop space in "* addr"
- whitespace & indentation fixes
- spello fixes
Signed-off-by: default avatarRandy Dunlap <randy.dunlap@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent c9cce83d
......@@ -37,7 +37,7 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void set_bit(int nr, volatile unsigned long * addr)
static inline void set_bit(int nr, volatile unsigned long *addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btsl %1,%0"
......@@ -54,7 +54,7 @@ static inline void set_bit(int nr, volatile unsigned long * addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __set_bit(int nr, volatile unsigned long * addr)
static inline void __set_bit(int nr, volatile unsigned long *addr)
{
__asm__(
"btsl %1,%0"
......@@ -72,7 +72,7 @@ static inline void __set_bit(int nr, volatile unsigned long * addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
static inline void clear_bit(int nr, volatile unsigned long * addr)
static inline void clear_bit(int nr, volatile unsigned long *addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btrl %1,%0"
......@@ -94,7 +94,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
clear_bit(nr, addr);
}
static inline void __clear_bit(int nr, volatile unsigned long * addr)
static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
__asm__ __volatile__(
"btrl %1,%0"
......@@ -132,7 +132,7 @@ static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __change_bit(int nr, volatile unsigned long * addr)
static inline void __change_bit(int nr, volatile unsigned long *addr)
{
__asm__ __volatile__(
"btcl %1,%0"
......@@ -150,7 +150,7 @@ static inline void __change_bit(int nr, volatile unsigned long * addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void change_bit(int nr, volatile unsigned long * addr)
static inline void change_bit(int nr, volatile unsigned long *addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btcl %1,%0"
......@@ -167,7 +167,7 @@ static inline void change_bit(int nr, volatile unsigned long * addr)
* It may be reordered on other architectures than x86.
* It also implies a memory barrier.
*/
static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
......@@ -199,7 +199,7 @@ static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_set_bit(int nr, volatile unsigned long * addr)
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
......@@ -219,7 +219,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long * addr)
* It can be reorderdered on other architectures other than x86.
* It also implies a memory barrier.
*/
static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
......@@ -270,7 +270,7 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
......@@ -287,7 +287,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
* @nr: bit number to test
* @addr: Address to start counting from
*/
static int test_bit(int nr, const volatile void * addr);
static int test_bit(int nr, const volatile void *addr);
#endif
static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
......@@ -295,7 +295,7 @@ static __always_inline int constant_test_bit(int nr, const volatile unsigned lon
return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
}
static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
static inline int variable_test_bit(int nr, const volatile unsigned long *addr)
{
int oldbit;
......@@ -306,10 +306,10 @@ static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
return oldbit;
}
#define test_bit(nr,addr) \
(__builtin_constant_p(nr) ? \
constant_test_bit((nr),(addr)) : \
variable_test_bit((nr),(addr)))
#define test_bit(nr, addr) \
(__builtin_constant_p(nr) ? \
constant_test_bit((nr), (addr)) : \
variable_test_bit((nr), (addr)))
#undef ADDR
......@@ -318,7 +318,7 @@ static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
* @addr: The address to start the search at
* @size: The maximum size to search
*
* Returns the bit-number of the first zero bit, not the number of the byte
* Returns the bit number of the first zero bit, not the number of the byte
* containing a bit.
*/
static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
......@@ -348,7 +348,7 @@ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
/**
* find_next_zero_bit - find the first zero bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @offset: The bit number to start searching at
* @size: The maximum size to search
*/
int find_next_zero_bit(const unsigned long *addr, int size, int offset);
......@@ -372,7 +372,7 @@ static inline unsigned long __ffs(unsigned long word)
* @addr: The address to start the search at
* @size: The maximum size to search
*
* Returns the bit-number of the first set bit, not the number of the byte
* Returns the bit number of the first set bit, not the number of the byte
* containing a bit.
*/
static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
......@@ -391,7 +391,7 @@ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
/**
* find_next_bit - find the first set bit in a memory region
* @addr: The address to base the search on
* @offset: The bitnumber to start searching at
* @offset: The bit number to start searching at
* @size: The maximum size to search
*/
int find_next_bit(const unsigned long *addr, int size, int offset);
......@@ -460,10 +460,10 @@ static inline int fls(int x)
#include <asm-generic/bitops/ext2-non-atomic.h>
#define ext2_set_bit_atomic(lock,nr,addr) \
test_and_set_bit((nr),(unsigned long*)addr)
#define ext2_clear_bit_atomic(lock,nr, addr) \
test_and_clear_bit((nr),(unsigned long*)addr)
#define ext2_set_bit_atomic(lock, nr, addr) \
test_and_set_bit((nr), (unsigned long *)addr)
#define ext2_clear_bit_atomic(lock, nr, addr) \
test_and_clear_bit((nr), (unsigned long *)addr)
#include <asm-generic/bitops/minix.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment