Commit f8e08a84 authored by Al Viro's avatar Al Viro Committed by Linus Torvalds

[PATCH] reiserfs endianness: sanitize reiserfs_key union

Since we only access reiserfs_key ->u.k_offset_v2 guts in four helper
functions, we are free to sanitize those, as long as

- layout of the structure is unchanged (it's on-disk object)

- behaviour of these helpers is same as before.

Patch kills the mess with endianness-dependent bitfields and replaces them
with a single __le64.  Helpers are switched to straightforward shift/and/or.

Benefits:

- exact same definitions for little- and big-endian architectures; no ifdefs
  in sight.

- generate the same code on little-endian and improved on big-endian.

- doesn't rely on lousy bitfields handling in gcc codegenerator.

- happens to be standard C (unsigned long long is not a valid type for a
  bitfield; it's a gccism and not well-implemented one, at that).
Signed-off-by: default avatarAl Viro <viro@parcelfarce.linux.theplanet.co.uk>
Cc: <reiserfs-dev@namesys.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 6b9f5829
...@@ -381,57 +381,29 @@ struct offset_v1 { ...@@ -381,57 +381,29 @@ struct offset_v1 {
} __attribute__ ((__packed__)); } __attribute__ ((__packed__));
struct offset_v2 { struct offset_v2 {
#ifdef __LITTLE_ENDIAN __le64 v;
/* little endian version */
__u64 k_offset:60;
__u64 k_type: 4;
#else
/* big endian version */
__u64 k_type: 4;
__u64 k_offset:60;
#endif
} __attribute__ ((__packed__)); } __attribute__ ((__packed__));
#ifndef __LITTLE_ENDIAN
typedef union {
struct offset_v2 offset_v2;
__u64 linear;
} __attribute__ ((__packed__)) offset_v2_esafe_overlay;
static inline __u16 offset_v2_k_type( const struct offset_v2 *v2 ) static inline __u16 offset_v2_k_type( const struct offset_v2 *v2 )
{ {
offset_v2_esafe_overlay tmp = *(const offset_v2_esafe_overlay *)v2; __u8 type = le64_to_cpu(v2->v) >> 60;
tmp.linear = le64_to_cpu( tmp.linear ); return (type <= TYPE_MAXTYPE)?type:TYPE_ANY;
return (tmp.offset_v2.k_type <= TYPE_MAXTYPE)?tmp.offset_v2.k_type:TYPE_ANY;
} }
static inline void set_offset_v2_k_type( struct offset_v2 *v2, int type ) static inline void set_offset_v2_k_type( struct offset_v2 *v2, int type )
{ {
offset_v2_esafe_overlay *tmp = (offset_v2_esafe_overlay *)v2; v2->v = (v2->v & cpu_to_le64(~0ULL>>4)) | cpu_to_le64((__u64)type<<60);
tmp->linear = le64_to_cpu(tmp->linear);
tmp->offset_v2.k_type = type;
tmp->linear = cpu_to_le64(tmp->linear);
} }
static inline loff_t offset_v2_k_offset( const struct offset_v2 *v2 ) static inline loff_t offset_v2_k_offset( const struct offset_v2 *v2 )
{ {
offset_v2_esafe_overlay tmp = *(const offset_v2_esafe_overlay *)v2; return le64_to_cpu(v2->v) & (~0ULL>>4);
tmp.linear = le64_to_cpu( tmp.linear );
return tmp.offset_v2.k_offset;
} }
static inline void set_offset_v2_k_offset( struct offset_v2 *v2, loff_t offset ){ static inline void set_offset_v2_k_offset( struct offset_v2 *v2, loff_t offset ){
offset_v2_esafe_overlay *tmp = (offset_v2_esafe_overlay *)v2; offset &= (~0ULL>>4);
tmp->linear = le64_to_cpu(tmp->linear); v2->v = (v2->v & cpu_to_le64(15ULL<<60)) | cpu_to_le64(offset);
tmp->offset_v2.k_offset = offset;
tmp->linear = cpu_to_le64(tmp->linear);
} }
#else
# define offset_v2_k_type(v2) ((v2)->k_type)
# define set_offset_v2_k_type(v2,val) (offset_v2_k_type(v2) = (val))
# define offset_v2_k_offset(v2) ((v2)->k_offset)
# define set_offset_v2_k_offset(v2,val) (offset_v2_k_offset(v2) = (val))
#endif
/* Key of an item determines its location in the S+tree, and /* Key of an item determines its location in the S+tree, and
is composed of 4 components */ is composed of 4 components */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment