Commit ad2fc2cd authored by Vitaly Mayatskikh's avatar Vitaly Mayatskikh Committed by Ingo Molnar

x86: fix copy_user on x86

Switch copy_user_generic_string(), copy_user_generic_unrolled() and
__copy_user_nocache() from custom tail handlers to generic
copy_user_tail_handle().
Signed-off-by: default avatarVitaly Mayatskikh <v.mayatskih@gmail.com>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1129585a
This diff is collapsed.
/* Copyright 2002 Andi Kleen, SuSE Labs. /*
* Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
* Copyright 2002 Andi Kleen, SuSE Labs.
* Subject to the GNU Public License v2. * Subject to the GNU Public License v2.
* *
* Functions to copy from and to user space. * Functions to copy from and to user space.
...@@ -12,204 +14,125 @@ ...@@ -12,204 +14,125 @@
#include <asm/current.h> #include <asm/current.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/cpufeature.h>
/*
* copy_user_nocache - Uncached memory copy with exception handling
* This will force destination/source out of cache for more performance.
*
* Input:
* rdi destination
* rsi source
* rdx count
* rcx zero flag when 1 zero on exception
*
* Output:
* eax uncopied bytes or 0 if successful.
*/
ENTRY(__copy_user_nocache)
CFI_STARTPROC
pushq %rbx
CFI_ADJUST_CFA_OFFSET 8
CFI_REL_OFFSET rbx, 0
pushq %rcx /* save zero flag */
CFI_ADJUST_CFA_OFFSET 8
CFI_REL_OFFSET rcx, 0
xorl %eax,%eax /* zero for the exception handler */
.macro ALIGN_DESTINATION
#ifdef FIX_ALIGNMENT #ifdef FIX_ALIGNMENT
/* check for bad alignment of destination */ /* check for bad alignment of destination */
movl %edi,%ecx movl %edi,%ecx
andl $7,%ecx andl $7,%ecx
jnz .Lbad_alignment jz 102f /* already aligned */
.Lafter_bad_alignment: subl $8,%ecx
#endif negl %ecx
subl %ecx,%edx
movq %rdx,%rcx 100: movb (%rsi),%al
101: movb %al,(%rdi)
movl $64,%ebx incq %rsi
shrq $6,%rdx incq %rdi
decq %rdx decl %ecx
js .Lhandle_tail jnz 100b
102:
.p2align 4 .section .fixup,"ax"
.Lloop: 103: addl %r8d,%edx /* ecx is zerorest also */
.Ls1: movq (%rsi),%r11 jmp copy_user_handle_tail
.Ls2: movq 1*8(%rsi),%r8 .previous
.Ls3: movq 2*8(%rsi),%r9
.Ls4: movq 3*8(%rsi),%r10
.Ld1: movnti %r11,(%rdi)
.Ld2: movnti %r8,1*8(%rdi)
.Ld3: movnti %r9,2*8(%rdi)
.Ld4: movnti %r10,3*8(%rdi)
.Ls5: movq 4*8(%rsi),%r11
.Ls6: movq 5*8(%rsi),%r8
.Ls7: movq 6*8(%rsi),%r9
.Ls8: movq 7*8(%rsi),%r10
.Ld5: movnti %r11,4*8(%rdi)
.Ld6: movnti %r8,5*8(%rdi)
.Ld7: movnti %r9,6*8(%rdi)
.Ld8: movnti %r10,7*8(%rdi)
dec %rdx .section __ex_table,"a"
.align 8
.quad 100b,103b
.quad 101b,103b
.previous
#endif
.endm
/*
* copy_user_nocache - Uncached memory copy with exception handling
* This will force destination/source out of cache for more performance.
*/
ENTRY(__copy_user_nocache)
CFI_STARTPROC
cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */
ALIGN_DESTINATION
movl %edx,%ecx
andl $63,%edx
shrl $6,%ecx
jz 17f
1: movq (%rsi),%r8
2: movq 1*8(%rsi),%r9
3: movq 2*8(%rsi),%r10
4: movq 3*8(%rsi),%r11
5: movnti %r8,(%rdi)
6: movnti %r9,1*8(%rdi)
7: movnti %r10,2*8(%rdi)
8: movnti %r11,3*8(%rdi)
9: movq 4*8(%rsi),%r8
10: movq 5*8(%rsi),%r9
11: movq 6*8(%rsi),%r10
12: movq 7*8(%rsi),%r11
13: movnti %r8,4*8(%rdi)
14: movnti %r9,5*8(%rdi)
15: movnti %r10,6*8(%rdi)
16: movnti %r11,7*8(%rdi)
leaq 64(%rsi),%rsi leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi leaq 64(%rdi),%rdi
jns .Lloop
.p2align 4
.Lhandle_tail:
movl %ecx,%edx
andl $63,%ecx
shrl $3,%ecx
jz .Lhandle_7
movl $8,%ebx
.p2align 4
.Lloop_8:
.Ls9: movq (%rsi),%r8
.Ld9: movnti %r8,(%rdi)
decl %ecx decl %ecx
leaq 8(%rdi),%rdi jnz 1b
17: movl %edx,%ecx
andl $7,%edx
shrl $3,%ecx
jz 20f
18: movq (%rsi),%r8
19: movnti %r8,(%rdi)
leaq 8(%rsi),%rsi leaq 8(%rsi),%rsi
jnz .Lloop_8 leaq 8(%rdi),%rdi
decl %ecx
.Lhandle_7: jnz 18b
20: andl %edx,%edx
jz 23f
movl %edx,%ecx movl %edx,%ecx
andl $7,%ecx 21: movb (%rsi),%al
jz .Lende 22: movb %al,(%rdi)
.p2align 4
.Lloop_1:
.Ls10: movb (%rsi),%bl
.Ld10: movb %bl,(%rdi)
incq %rdi
incq %rsi incq %rsi
incq %rdi
decl %ecx decl %ecx
jnz .Lloop_1 jnz 21b
23: xorl %eax,%eax
CFI_REMEMBER_STATE
.Lende:
popq %rcx
CFI_ADJUST_CFA_OFFSET -8
CFI_RESTORE %rcx
popq %rbx
CFI_ADJUST_CFA_OFFSET -8
CFI_RESTORE rbx
sfence sfence
ret ret
CFI_RESTORE_STATE
#ifdef FIX_ALIGNMENT .section .fixup,"ax"
/* align destination */ 30: shll $6,%ecx
.p2align 4 addl %ecx,%edx
.Lbad_alignment: jmp 60f
movl $8,%r9d 40: leal (%edx,%ecx,8),%edx
subl %ecx,%r9d jmp 60f
movl %r9d,%ecx 50: movl %ecx,%edx
cmpq %r9,%rdx 60: sfence
jz .Lhandle_7 movl %r8d,%ecx
js .Lhandle_7 jmp copy_user_handle_tail
.Lalign_1: .previous
.Ls11: movb (%rsi),%bl
.Ld11: movb %bl,(%rdi)
incq %rsi
incq %rdi
decl %ecx
jnz .Lalign_1
subq %r9,%rdx
jmp .Lafter_bad_alignment
#endif
/* table sorted by exception address */
.section __ex_table,"a" .section __ex_table,"a"
.align 8 .quad 1b,30b
.quad .Ls1,.Ls1e /* .Ls[1-4] - 0 bytes copied */ .quad 2b,30b
.quad .Ls2,.Ls1e .quad 3b,30b
.quad .Ls3,.Ls1e .quad 4b,30b
.quad .Ls4,.Ls1e .quad 5b,30b
.quad .Ld1,.Ls1e /* .Ld[1-4] - 0..24 bytes coped */ .quad 6b,30b
.quad .Ld2,.Ls2e .quad 7b,30b
.quad .Ld3,.Ls3e .quad 8b,30b
.quad .Ld4,.Ls4e .quad 9b,30b
.quad .Ls5,.Ls5e /* .Ls[5-8] - 32 bytes copied */ .quad 10b,30b
.quad .Ls6,.Ls5e .quad 11b,30b
.quad .Ls7,.Ls5e .quad 12b,30b
.quad .Ls8,.Ls5e .quad 13b,30b
.quad .Ld5,.Ls5e /* .Ld[5-8] - 32..56 bytes copied */ .quad 14b,30b
.quad .Ld6,.Ls6e .quad 15b,30b
.quad .Ld7,.Ls7e .quad 16b,30b
.quad .Ld8,.Ls8e .quad 18b,40b
.quad .Ls9,.Le_quad .quad 19b,40b
.quad .Ld9,.Le_quad .quad 21b,50b
.quad .Ls10,.Le_byte .quad 22b,50b
.quad .Ld10,.Le_byte
#ifdef FIX_ALIGNMENT
.quad .Ls11,.Lzero_rest
.quad .Ld11,.Lzero_rest
#endif
.quad .Le5,.Le_zero
.previous .previous
/* eax: zero, ebx: 64 */
.Ls1e: addl $8,%eax /* eax: bytes left uncopied: Ls1e: 64 .. Ls8e: 8 */
.Ls2e: addl $8,%eax
.Ls3e: addl $8,%eax
.Ls4e: addl $8,%eax
.Ls5e: addl $8,%eax
.Ls6e: addl $8,%eax
.Ls7e: addl $8,%eax
.Ls8e: addl $8,%eax
addq %rbx,%rdi /* +64 */
subq %rax,%rdi /* correct destination with computed offset */
shlq $6,%rdx /* loop counter * 64 (stride length) */
addq %rax,%rdx /* add offset to loopcnt */
andl $63,%ecx /* remaining bytes */
addq %rcx,%rdx /* add them */
jmp .Lzero_rest
/* exception on quad word loop in tail handling */
/* ecx: loopcnt/8, %edx: length, rdi: correct */
.Le_quad:
shll $3,%ecx
andl $7,%edx
addl %ecx,%edx
/* edx: bytes to zero, rdi: dest, eax:zero */
.Lzero_rest:
cmpl $0,(%rsp) /* zero flag set? */
jz .Le_zero
movq %rdx,%rcx
.Le_byte:
xorl %eax,%eax
.Le5: rep
stosb
/* when there is another exception while zeroing the rest just return */
.Le_zero:
movq %rdx,%rax
jmp .Lende
CFI_ENDPROC CFI_ENDPROC
ENDPROC(__copy_user_nocache) ENDPROC(__copy_user_nocache)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment