Commit 0db125c4 authored by Vegard Nossum's avatar Vegard Nossum Committed by Ingo Molnar

x86: more header fixes

Summary: Add missing include guards for some x86 headers.

This has only had the most rudimentary testing, but is hopefully obviously
correct.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent e6e07d8a
#ifndef _ASM_SECCOMP_H #ifndef _ASM_SECCOMP_H
#define _ASM_SECCOMP_H
#include <linux/thread_info.h> #include <linux/thread_info.h>
......
...@@ -3,6 +3,9 @@ ...@@ -3,6 +3,9 @@
* Based on code * Based on code
* Copyright 2001 Patrick Mochel <mochel@osdl.org> * Copyright 2001 Patrick Mochel <mochel@osdl.org>
*/ */
#ifndef __ASM_X86_32_SUSPEND_H
#define __ASM_X86_32_SUSPEND_H
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/i387.h> #include <asm/i387.h>
...@@ -44,3 +47,5 @@ static inline void acpi_save_register_state(unsigned long return_point) ...@@ -44,3 +47,5 @@ static inline void acpi_save_register_state(unsigned long return_point)
/* routines for saving/restoring kernel state */ /* routines for saving/restoring kernel state */
extern int acpi_save_state_mem(void); extern int acpi_save_state_mem(void);
#endif #endif
#endif /* __ASM_X86_32_SUSPEND_H */
#ifndef ASM_X86__XOR_32_H
#define ASM_X86__XOR_32_H
/* /*
* Optimized RAID-5 checksumming functions for MMX and SSE. * Optimized RAID-5 checksumming functions for MMX and SSE.
* *
...@@ -881,3 +884,5 @@ do { \ ...@@ -881,3 +884,5 @@ do { \
deals with a load to a line that is being prefetched. */ deals with a load to a line that is being prefetched. */
#define XOR_SELECT_TEMPLATE(FASTEST) \ #define XOR_SELECT_TEMPLATE(FASTEST) \
(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
#endif /* ASM_X86__XOR_32_H */
#ifndef ASM_X86__XOR_64_H
#define ASM_X86__XOR_64_H
/* /*
* Optimized RAID-5 checksumming functions for MMX and SSE. * Optimized RAID-5 checksumming functions for MMX and SSE.
* *
...@@ -354,3 +357,5 @@ do { \ ...@@ -354,3 +357,5 @@ do { \
We may also be able to load into the L1 only depending on how the cpu We may also be able to load into the L1 only depending on how the cpu
deals with a load to a line that is being prefetched. */ deals with a load to a line that is being prefetched. */
#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse) #define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
#endif /* ASM_X86__XOR_64_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment