Commit b53e675d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Lachlan McIlroy

[XFS] xlog_rec_header/xlog_rec_ext_header endianess annotations

Mostly trivial conversion with one exceptions: h_num_logops was kept in
native endian previously and only converted to big endian in xlog_sync,
but we always keep it big endian now. With todays cpus fast byteswap
instructions that's not an issue but the new variant keeps the code clean
and maintainable.

SGI-PV: 971186
SGI-Modid: xfs-linux-melb:xfs-kern:29821a
Signed-off-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarLachlan McIlroy <lachlan@sgi.com>
Signed-off-by: default avatarTim Shimmin <tes@sgi.com>
parent 67fcb7bf
...@@ -1227,12 +1227,12 @@ xlog_alloc_log(xfs_mount_t *mp, ...@@ -1227,12 +1227,12 @@ xlog_alloc_log(xfs_mount_t *mp,
head = &iclog->ic_header; head = &iclog->ic_header;
memset(head, 0, sizeof(xlog_rec_header_t)); memset(head, 0, sizeof(xlog_rec_header_t));
INT_SET(head->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM); head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
INT_SET(head->h_version, ARCH_CONVERT, head->h_version = cpu_to_be32(
XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
INT_SET(head->h_size, ARCH_CONVERT, log->l_iclog_size); head->h_size = cpu_to_be32(log->l_iclog_size);
/* new fields */ /* new fields */
INT_SET(head->h_fmt, ARCH_CONVERT, XLOG_FMT); head->h_fmt = cpu_to_be32(XLOG_FMT);
memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
...@@ -1378,7 +1378,7 @@ xlog_sync(xlog_t *log, ...@@ -1378,7 +1378,7 @@ xlog_sync(xlog_t *log,
{ {
xfs_caddr_t dptr; /* pointer to byte sized element */ xfs_caddr_t dptr; /* pointer to byte sized element */
xfs_buf_t *bp; xfs_buf_t *bp;
int i, ops; int i;
uint count; /* byte count of bwrite */ uint count; /* byte count of bwrite */
uint count_init; /* initial count before roundup */ uint count_init; /* initial count before roundup */
int roundoff; /* roundoff to BB or stripe */ int roundoff; /* roundoff to BB or stripe */
...@@ -1417,21 +1417,17 @@ xlog_sync(xlog_t *log, ...@@ -1417,21 +1417,17 @@ xlog_sync(xlog_t *log,
/* real byte length */ /* real byte length */
if (v2) { if (v2) {
INT_SET(iclog->ic_header.h_len, iclog->ic_header.h_len =
ARCH_CONVERT, cpu_to_be32(iclog->ic_offset + roundoff);
iclog->ic_offset + roundoff);
} else { } else {
INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset); iclog->ic_header.h_len =
cpu_to_be32(iclog->ic_offset);
} }
/* put ops count in correct order */
ops = iclog->ic_header.h_num_logops;
INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops);
bp = iclog->ic_bp; bp = iclog->ic_bp;
ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1);
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)));
XFS_STATS_ADD(xs_log_blocks, BTOBB(count)); XFS_STATS_ADD(xs_log_blocks, BTOBB(count));
...@@ -1494,10 +1490,10 @@ xlog_sync(xlog_t *log, ...@@ -1494,10 +1490,10 @@ xlog_sync(xlog_t *log,
* a new cycle. Watch out for the header magic number * a new cycle. Watch out for the header magic number
* case, though. * case, though.
*/ */
for (i=0; i<split; i += BBSIZE) { for (i = 0; i < split; i += BBSIZE) {
INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1); be32_add((__be32 *)dptr, 1);
if (INT_GET(*(uint *)dptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM)
INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1); be32_add((__be32 *)dptr, 1);
dptr += BBSIZE; dptr += BBSIZE;
} }
...@@ -1586,7 +1582,7 @@ xlog_state_finish_copy(xlog_t *log, ...@@ -1586,7 +1582,7 @@ xlog_state_finish_copy(xlog_t *log,
{ {
spin_lock(&log->l_icloglock); spin_lock(&log->l_icloglock);
iclog->ic_header.h_num_logops += record_cnt; be32_add(&iclog->ic_header.h_num_logops, record_cnt);
iclog->ic_offset += copy_bytes; iclog->ic_offset += copy_bytes;
spin_unlock(&log->l_icloglock); spin_unlock(&log->l_icloglock);
...@@ -1813,7 +1809,7 @@ xlog_write(xfs_mount_t * mp, ...@@ -1813,7 +1809,7 @@ xlog_write(xfs_mount_t * mp,
/* start_lsn is the first lsn written to. That's all we need. */ /* start_lsn is the first lsn written to. That's all we need. */
if (! *start_lsn) if (! *start_lsn)
*start_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
/* This loop writes out as many regions as can fit in the amount /* This loop writes out as many regions as can fit in the amount
* of space which was allocated by xlog_state_get_iclog_space(). * of space which was allocated by xlog_state_get_iclog_space().
...@@ -1983,7 +1979,8 @@ xlog_state_clean_log(xlog_t *log) ...@@ -1983,7 +1979,8 @@ xlog_state_clean_log(xlog_t *log)
* We don't need to cover the dummy. * We don't need to cover the dummy.
*/ */
if (!changed && if (!changed &&
(INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT) == XLOG_COVER_OPS)) { (be32_to_cpu(iclog->ic_header.h_num_logops) ==
XLOG_COVER_OPS)) {
changed = 1; changed = 1;
} else { } else {
/* /*
...@@ -2051,7 +2048,7 @@ xlog_get_lowest_lsn( ...@@ -2051,7 +2048,7 @@ xlog_get_lowest_lsn(
lowest_lsn = 0; lowest_lsn = 0;
do { do {
if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) {
lsn = INT_GET(lsn_log->ic_header.h_lsn, ARCH_CONVERT); lsn = be64_to_cpu(lsn_log->ic_header.h_lsn);
if ((lsn && !lowest_lsn) || if ((lsn && !lowest_lsn) ||
(XFS_LSN_CMP(lsn, lowest_lsn) < 0)) { (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) {
lowest_lsn = lsn; lowest_lsn = lsn;
...@@ -2152,11 +2149,9 @@ xlog_state_do_callback( ...@@ -2152,11 +2149,9 @@ xlog_state_do_callback(
*/ */
lowest_lsn = xlog_get_lowest_lsn(log); lowest_lsn = xlog_get_lowest_lsn(log);
if (lowest_lsn && ( if (lowest_lsn &&
XFS_LSN_CMP( XFS_LSN_CMP(lowest_lsn,
lowest_lsn, be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)
)<0)) {
iclog = iclog->ic_next; iclog = iclog->ic_next;
continue; /* Leave this iclog for continue; /* Leave this iclog for
* another thread */ * another thread */
...@@ -2171,11 +2166,10 @@ xlog_state_do_callback( ...@@ -2171,11 +2166,10 @@ xlog_state_do_callback(
* No one else can be here except us. * No one else can be here except us.
*/ */
spin_lock(&log->l_grant_lock); spin_lock(&log->l_grant_lock);
ASSERT(XFS_LSN_CMP( ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn,
log->l_last_sync_lsn, be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) log->l_last_sync_lsn =
)<=0); be64_to_cpu(iclog->ic_header.h_lsn);
log->l_last_sync_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
spin_unlock(&log->l_grant_lock); spin_unlock(&log->l_grant_lock);
/* /*
...@@ -2392,8 +2386,8 @@ restart: ...@@ -2392,8 +2386,8 @@ restart:
xlog_tic_add_region(ticket, xlog_tic_add_region(ticket,
log->l_iclog_hsize, log->l_iclog_hsize,
XLOG_REG_TYPE_LRHEADER); XLOG_REG_TYPE_LRHEADER);
INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle); head->h_cycle = cpu_to_be32(log->l_curr_cycle);
INT_SET(head->h_lsn, ARCH_CONVERT, head->h_lsn = cpu_to_be64(
xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
ASSERT(log->l_curr_block >= 0); ASSERT(log->l_curr_block >= 0);
} }
...@@ -2823,7 +2817,7 @@ xlog_state_release_iclog(xlog_t *log, ...@@ -2823,7 +2817,7 @@ xlog_state_release_iclog(xlog_t *log,
iclog->ic_state == XLOG_STATE_WANT_SYNC) { iclog->ic_state == XLOG_STATE_WANT_SYNC) {
sync++; sync++;
iclog->ic_state = XLOG_STATE_SYNCING; iclog->ic_state = XLOG_STATE_SYNCING;
INT_SET(iclog->ic_header.h_tail_lsn, ARCH_CONVERT, log->l_tail_lsn); iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn);
xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn);
/* cycle incremented when incrementing curr_block */ /* cycle incremented when incrementing curr_block */
} }
...@@ -2861,7 +2855,7 @@ xlog_state_switch_iclogs(xlog_t *log, ...@@ -2861,7 +2855,7 @@ xlog_state_switch_iclogs(xlog_t *log,
if (!eventual_size) if (!eventual_size)
eventual_size = iclog->ic_offset; eventual_size = iclog->ic_offset;
iclog->ic_state = XLOG_STATE_WANT_SYNC; iclog->ic_state = XLOG_STATE_WANT_SYNC;
INT_SET(iclog->ic_header.h_prev_block, ARCH_CONVERT, log->l_prev_block); iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
log->l_prev_block = log->l_curr_block; log->l_prev_block = log->l_curr_block;
log->l_prev_cycle = log->l_curr_cycle; log->l_prev_cycle = log->l_curr_cycle;
...@@ -2957,7 +2951,7 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) ...@@ -2957,7 +2951,7 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
* the previous sync. * the previous sync.
*/ */
iclog->ic_refcnt++; iclog->ic_refcnt++;
lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); lsn = be64_to_cpu(iclog->ic_header.h_lsn);
xlog_state_switch_iclogs(log, iclog, 0); xlog_state_switch_iclogs(log, iclog, 0);
spin_unlock(&log->l_icloglock); spin_unlock(&log->l_icloglock);
...@@ -2965,7 +2959,7 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed) ...@@ -2965,7 +2959,7 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
*log_flushed = 1; *log_flushed = 1;
spin_lock(&log->l_icloglock); spin_lock(&log->l_icloglock);
if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn && if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn &&
iclog->ic_state != XLOG_STATE_DIRTY) iclog->ic_state != XLOG_STATE_DIRTY)
goto maybe_sleep; goto maybe_sleep;
else else
...@@ -3049,9 +3043,9 @@ try_again: ...@@ -3049,9 +3043,9 @@ try_again:
} }
do { do {
if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) != lsn) { if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
iclog = iclog->ic_next; iclog = iclog->ic_next;
continue; continue;
} }
if (iclog->ic_state == XLOG_STATE_DIRTY) { if (iclog->ic_state == XLOG_STATE_DIRTY) {
...@@ -3460,18 +3454,18 @@ xlog_verify_iclog(xlog_t *log, ...@@ -3460,18 +3454,18 @@ xlog_verify_iclog(xlog_t *log,
spin_unlock(&log->l_icloglock); spin_unlock(&log->l_icloglock);
/* check log magic numbers */ /* check log magic numbers */
ptr = (xfs_caddr_t) &(iclog->ic_header); if (be32_to_cpu(iclog->ic_header.h_magicno) != XLOG_HEADER_MAGIC_NUM)
if (INT_GET(*(uint *)ptr, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM)
xlog_panic("xlog_verify_iclog: invalid magic num"); xlog_panic("xlog_verify_iclog: invalid magic num");
for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&(iclog->ic_header))+count; ptr = (xfs_caddr_t) &iclog->ic_header;
for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count;
ptr += BBSIZE) { ptr += BBSIZE) {
if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM)
xlog_panic("xlog_verify_iclog: unexpected magic num"); xlog_panic("xlog_verify_iclog: unexpected magic num");
} }
/* check fields */ /* check fields */
len = INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT); len = be32_to_cpu(iclog->ic_header.h_num_logops);
ptr = iclog->ic_datap; ptr = iclog->ic_datap;
base_ptr = ptr; base_ptr = ptr;
ophead = (xlog_op_header_t *)ptr; ophead = (xlog_op_header_t *)ptr;
...@@ -3512,9 +3506,9 @@ xlog_verify_iclog(xlog_t *log, ...@@ -3512,9 +3506,9 @@ xlog_verify_iclog(xlog_t *log,
if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
op_len = INT_GET(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT); op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
} else { } else {
op_len = INT_GET(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT); op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
} }
} }
ptr += sizeof(xlog_op_header_t) + op_len; ptr += sizeof(xlog_op_header_t) + op_len;
......
...@@ -22,8 +22,9 @@ ...@@ -22,8 +22,9 @@
#define CYCLE_LSN(lsn) ((uint)((lsn)>>32)) #define CYCLE_LSN(lsn) ((uint)((lsn)>>32))
#define BLOCK_LSN(lsn) ((uint)(lsn)) #define BLOCK_LSN(lsn) ((uint)(lsn))
/* this is used in a spot where we might otherwise double-endian-flip */ /* this is used in a spot where we might otherwise double-endian-flip */
#define CYCLE_LSN_DISK(lsn) (((uint *)&(lsn))[0]) #define CYCLE_LSN_DISK(lsn) (((__be32 *)&(lsn))[0])
#ifdef __KERNEL__ #ifdef __KERNEL__
/* /*
......
...@@ -63,10 +63,10 @@ static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block) ...@@ -63,10 +63,10 @@ static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)
static inline uint xlog_get_cycle(char *ptr) static inline uint xlog_get_cycle(char *ptr)
{ {
if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM)
return INT_GET(*((uint *)ptr + 1), ARCH_CONVERT); return be32_to_cpu(*((__be32 *)ptr + 1));
else else
return INT_GET(*(uint *)ptr, ARCH_CONVERT); return be32_to_cpu(*(__be32 *)ptr);
} }
#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
...@@ -85,9 +85,9 @@ static inline uint xlog_get_cycle(char *ptr) ...@@ -85,9 +85,9 @@ static inline uint xlog_get_cycle(char *ptr)
* *
* this has endian issues, of course. * this has endian issues, of course.
*/ */
static inline uint xlog_get_client_id(uint i) static inline uint xlog_get_client_id(__be32 i)
{ {
return INT_GET(i, ARCH_CONVERT) >> 24; return be32_to_cpu(i) >> 24;
} }
#define xlog_panic(args...) cmn_err(CE_PANIC, ## args) #define xlog_panic(args...) cmn_err(CE_PANIC, ## args)
...@@ -287,25 +287,25 @@ typedef struct xlog_op_header { ...@@ -287,25 +287,25 @@ typedef struct xlog_op_header {
#endif #endif
typedef struct xlog_rec_header { typedef struct xlog_rec_header {
uint h_magicno; /* log record (LR) identifier : 4 */ __be32 h_magicno; /* log record (LR) identifier : 4 */
uint h_cycle; /* write cycle of log : 4 */ __be32 h_cycle; /* write cycle of log : 4 */
int h_version; /* LR version : 4 */ __be32 h_version; /* LR version : 4 */
int h_len; /* len in bytes; should be 64-bit aligned: 4 */ __be32 h_len; /* len in bytes; should be 64-bit aligned: 4 */
xfs_lsn_t h_lsn; /* lsn of this LR : 8 */ __be64 h_lsn; /* lsn of this LR : 8 */
xfs_lsn_t h_tail_lsn; /* lsn of 1st LR w/ buffers not committed: 8 */ __be64 h_tail_lsn; /* lsn of 1st LR w/ buffers not committed: 8 */
uint h_chksum; /* may not be used; non-zero if used : 4 */ __be32 h_chksum; /* may not be used; non-zero if used : 4 */
int h_prev_block; /* block number to previous LR : 4 */ __be32 h_prev_block; /* block number to previous LR : 4 */
int h_num_logops; /* number of log operations in this LR : 4 */ __be32 h_num_logops; /* number of log operations in this LR : 4 */
uint h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; __be32 h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE];
/* new fields */ /* new fields */
int h_fmt; /* format of log record : 4 */ __be32 h_fmt; /* format of log record : 4 */
uuid_t h_fs_uuid; /* uuid of FS : 16 */ uuid_t h_fs_uuid; /* uuid of FS : 16 */
int h_size; /* iclog size : 4 */ __be32 h_size; /* iclog size : 4 */
} xlog_rec_header_t; } xlog_rec_header_t;
typedef struct xlog_rec_ext_header { typedef struct xlog_rec_ext_header {
uint xh_cycle; /* write cycle of log : 4 */ __be32 xh_cycle; /* write cycle of log : 4 */
uint xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */ __be32 xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */
} xlog_rec_ext_header_t; } xlog_rec_ext_header_t;
#ifdef __KERNEL__ #ifdef __KERNEL__
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment