Commit 5015b494 authored by Stephen Rothwell's avatar Stephen Rothwell

powerpc: fix __strnlen_user in merge tree

Change USER/KERNEL_DS so that the merged version of
__strnlen_user can be used which allows us to complete the
removal of arch/ppc64/lib/.
Signed-off-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
parent 2df5e8bc
...@@ -9,7 +9,7 @@ endif ...@@ -9,7 +9,7 @@ endif
obj-y += strcase.o obj-y += strcase.o
obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o
obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \ obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \
memcpy_64.o usercopy_64.o mem_64.o memcpy_64.o usercopy_64.o mem_64.o string.o
obj-$(CONFIG_PPC_ISERIES) += e2a.o obj-$(CONFIG_PPC_ISERIES) += e2a.o
obj-$(CONFIG_XMON) += sstep.o obj-$(CONFIG_XMON) += sstep.o
......
...@@ -86,7 +86,6 @@ head-y := arch/ppc64/kernel/head.o ...@@ -86,7 +86,6 @@ head-y := arch/ppc64/kernel/head.o
head-y += arch/powerpc/kernel/fpu.o head-y += arch/powerpc/kernel/fpu.o
head-y += arch/powerpc/kernel/entry_64.o head-y += arch/powerpc/kernel/entry_64.o
libs-y += arch/ppc64/lib/
core-y += arch/ppc64/kernel/ arch/powerpc/kernel/ core-y += arch/ppc64/kernel/ arch/powerpc/kernel/
core-y += arch/powerpc/mm/ core-y += arch/powerpc/mm/
core-y += arch/powerpc/sysdev/ core-y += arch/powerpc/sysdev/
......
#
# Makefile for ppc64-specific library files..
#
lib-y := string.o
/*
* String handling functions for PowerPC.
*
* Copyright (C) 1996 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
_GLOBAL(strcpy)
addi r5,r3,-1
addi r4,r4,-1
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r5)
bne 1b
blr
_GLOBAL(strncpy)
cmpwi 0,r5,0
beqlr
mtctr r5
addi r6,r3,-1
addi r4,r4,-1
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r6)
bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
blr
_GLOBAL(strcat)
addi r5,r3,-1
addi r4,r4,-1
1: lbzu r0,1(r5)
cmpwi 0,r0,0
bne 1b
addi r5,r5,-1
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r5)
bne 1b
blr
_GLOBAL(strcmp)
addi r5,r3,-1
addi r4,r4,-1
1: lbzu r3,1(r5)
cmpwi 1,r3,0
lbzu r0,1(r4)
subf. r3,r0,r3
beqlr 1
beq 1b
blr
_GLOBAL(strlen)
addi r4,r3,-1
1: lbzu r0,1(r4)
cmpwi 0,r0,0
bne 1b
subf r3,r3,r4
blr
_GLOBAL(memcmp)
cmpwi 0,r5,0
ble- 2f
mtctr r5
addi r6,r3,-1
addi r4,r4,-1
1: lbzu r3,1(r6)
lbzu r0,1(r4)
subf. r3,r0,r3
bdnzt 2,1b
blr
2: li r3,0
blr
_GLOBAL(memchr)
cmpwi 0,r5,0
ble- 2f
mtctr r5
addi r3,r3,-1
1: lbzu r0,1(r3)
cmpw 0,r0,r4
bdnzf 2,1b
beqlr
2: li r3,0
blr
_GLOBAL(__clear_user)
addi r6,r3,-4
li r3,0
li r5,0
cmplwi 0,r4,4
blt 7f
/* clear a single word */
11: stwu r5,4(r6)
beqlr
/* clear word sized chunks */
andi. r0,r6,3
add r4,r0,r4
subf r6,r0,r6
srwi r0,r4,2
andi. r4,r4,3
mtctr r0
bdz 7f
1: stwu r5,4(r6)
bdnz 1b
/* clear byte sized chunks */
7: cmpwi 0,r4,0
beqlr
mtctr r4
addi r6,r6,3
8: stbu r5,1(r6)
bdnz 8b
blr
90: mr r3,r4
blr
91: mfctr r3
slwi r3,r3,2
add r3,r3,r4
blr
92: mfctr r3
blr
.section __ex_table,"a"
.align 3
.llong 11b,90b
.llong 1b,91b
.llong 8b,92b
.text
/* r3 = dst, r4 = src, r5 = count */
_GLOBAL(__strncpy_from_user)
addi r6,r3,-1
addi r4,r4,-1
cmpwi 0,r5,0
beq 2f
mtctr r5
1: lbzu r0,1(r4)
cmpwi 0,r0,0
stbu r0,1(r6)
bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
beq 3f
2: addi r6,r6,1
3: subf r3,r3,r6
blr
99: li r3,-EFAULT
blr
.section __ex_table,"a"
.align 3
.llong 1b,99b
.text
/* r3 = str, r4 = len (> 0) */
_GLOBAL(__strnlen_user)
addi r7,r3,-1
mtctr r4 /* ctr = len */
1: lbzu r0,1(r7) /* get next byte */
cmpwi 0,r0,0
bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */
addi r7,r7,1
subf r3,r3,r7 /* number of bytes we have looked at */
beqlr /* return if we found a 0 byte */
cmpw 0,r3,r4 /* did we look at all len bytes? */
blt 99f /* if not, must have hit top */
addi r3,r4,1 /* return len + 1 to indicate no null found */
blr
99: li r3,0 /* bad address, return 0 */
blr
.section __ex_table,"a"
.align 3
.llong 1b,99b
...@@ -24,11 +24,11 @@ ...@@ -24,11 +24,11 @@
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define KERNEL_DS MAKE_MM_SEG(~0UL)
#ifdef __powerpc64__ #ifdef __powerpc64__
#define KERNEL_DS MAKE_MM_SEG(0UL) /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
#define USER_DS MAKE_MM_SEG(0xf000000000000000UL) #define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
#else #else
#define KERNEL_DS MAKE_MM_SEG(~0UL)
#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
#endif #endif
...@@ -40,22 +40,11 @@ ...@@ -40,22 +40,11 @@
#ifdef __powerpc64__ #ifdef __powerpc64__
/* /*
* Use the alpha trick for checking ranges: * This check is sufficient because there is a large enough
* * gap between user addresses and the kernel addresses
* Is a address valid? This does a straightforward calculation rather
* than tests.
*
* Address valid if:
* - "addr" doesn't have any high-bits set
* - AND "size" doesn't have any high-bits set
* - OR we are in kernel mode.
*
* We dont have to check for high bits in (addr+size) because the first
* two checks force the maximum result to be below the start of the
* kernel region.
*/ */
#define __access_ok(addr, size, segment) \ #define __access_ok(addr, size, segment) \
(((segment).seg & (addr | size )) == 0) (((addr) <= (segment).seg) && ((size) <= (segment).seg))
#else #else
...@@ -161,7 +150,10 @@ extern long __put_user_bad(void); ...@@ -161,7 +150,10 @@ extern long __put_user_bad(void);
: "=r" (err) \ : "=r" (err) \
: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
#ifndef __powerpc64__ #ifdef __powerpc64__
#define __put_user_asm2(x, ptr, retval) \
__put_user_asm(x, ptr, retval, "std")
#else /* __powerpc64__ */
#define __put_user_asm2(x, addr, err) \ #define __put_user_asm2(x, addr, err) \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: stw %1,0(%2)\n" \ "1: stw %1,0(%2)\n" \
...@@ -178,9 +170,6 @@ extern long __put_user_bad(void); ...@@ -178,9 +170,6 @@ extern long __put_user_bad(void);
".previous" \ ".previous" \
: "=r" (err) \ : "=r" (err) \
: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
#else /* __powerpc64__ */
#define __put_user_asm2(x, ptr, retval) \
__put_user_asm(x, ptr, retval, "std")
#endif /* __powerpc64__ */ #endif /* __powerpc64__ */
#define __put_user_size(x, ptr, size, retval) \ #define __put_user_size(x, ptr, size, retval) \
...@@ -218,7 +207,7 @@ extern long __get_user_bad(void); ...@@ -218,7 +207,7 @@ extern long __get_user_bad(void);
#define __get_user_asm(x, addr, err, op) \ #define __get_user_asm(x, addr, err, op) \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: "op" %1,0(%2) # get_user\n" \ "1: "op" %1,0(%2) # get_user\n" \
"2:\n" \ "2:\n" \
".section .fixup,\"ax\"\n" \ ".section .fixup,\"ax\"\n" \
"3: li %0,%3\n" \ "3: li %0,%3\n" \
...@@ -232,8 +221,11 @@ extern long __get_user_bad(void); ...@@ -232,8 +221,11 @@ extern long __get_user_bad(void);
: "=r" (err), "=r" (x) \ : "=r" (err), "=r" (x) \
: "b" (addr), "i" (-EFAULT), "0" (err)) : "b" (addr), "i" (-EFAULT), "0" (err))
#ifndef __powerpc64__ #ifdef __powerpc64__
#define __get_user_asm2(x, addr, err) \ #define __get_user_asm2(x, addr, err) \
__get_user_asm(x, addr, err, "ld")
#else /* __powerpc64__ */
#define __get_user_asm2(x, addr, err) \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: lwz %1,0(%2)\n" \ "1: lwz %1,0(%2)\n" \
"2: lwz %1+1,4(%2)\n" \ "2: lwz %1+1,4(%2)\n" \
...@@ -251,17 +243,14 @@ extern long __get_user_bad(void); ...@@ -251,17 +243,14 @@ extern long __get_user_bad(void);
".previous" \ ".previous" \
: "=r" (err), "=&r" (x) \ : "=r" (err), "=&r" (x) \
: "b" (addr), "i" (-EFAULT), "0" (err)) : "b" (addr), "i" (-EFAULT), "0" (err))
#else
#define __get_user_asm2(x, addr, err) \
__get_user_asm(x, addr, err, "ld")
#endif /* __powerpc64__ */ #endif /* __powerpc64__ */
#define __get_user_size(x, ptr, size, retval) \ #define __get_user_size(x, ptr, size, retval) \
do { \ do { \
retval = 0; \ retval = 0; \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
if (size > sizeof(x)) \ if (size > sizeof(x)) \
(x) = __get_user_bad(); \ (x) = __get_user_bad(); \
switch (size) { \ switch (size) { \
case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
...@@ -300,7 +289,7 @@ do { \ ...@@ -300,7 +289,7 @@ do { \
long __gu_err = -EFAULT; \ long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \ unsigned long __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
might_sleep(); \ might_sleep(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \ if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \ (x) = (__typeof__(*(ptr)))__gu_val; \
...@@ -313,8 +302,9 @@ extern unsigned long __copy_tofrom_user(void __user *to, ...@@ -313,8 +302,9 @@ extern unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size); const void __user *from, unsigned long size);
#ifndef __powerpc64__ #ifndef __powerpc64__
extern inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n) extern inline unsigned long copy_from_user(void *to,
const void __user *from, unsigned long n)
{ {
unsigned long over; unsigned long over;
...@@ -328,8 +318,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n) ...@@ -328,8 +318,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
return n; return n;
} }
extern inline unsigned long extern inline unsigned long copy_to_user(void __user *to,
copy_to_user(void __user *to, const void *from, unsigned long n) const void *from, unsigned long n)
{ {
unsigned long over; unsigned long over;
...@@ -343,10 +333,23 @@ copy_to_user(void __user *to, const void *from, unsigned long n) ...@@ -343,10 +333,23 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
return n; return n;
} }
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#else /* __powerpc64__ */ #else /* __powerpc64__ */
static inline unsigned long #define __copy_in_user(to, from, size) \
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) __copy_tofrom_user((to), (from), (size))
extern unsigned long copy_from_user(void *to, const void __user *from,
unsigned long n);
extern unsigned long copy_to_user(void __user *to, const void *from,
unsigned long n);
extern unsigned long copy_in_user(void __user *to, const void __user *from,
unsigned long n);
static inline unsigned long __copy_from_user_inatomic(void *to,
const void __user *from, unsigned long n)
{ {
if (__builtin_constant_p(n) && (n <= 8)) { if (__builtin_constant_p(n) && (n <= 8)) {
unsigned long ret; unsigned long ret;
...@@ -370,8 +373,8 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) ...@@ -370,8 +373,8 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return __copy_tofrom_user((__force void __user *) to, from, n); return __copy_tofrom_user((__force void __user *) to, from, n);
} }
static inline unsigned long static inline unsigned long __copy_to_user_inatomic(void __user *to,
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) const void *from, unsigned long n)
{ {
if (__builtin_constant_p(n) && (n <= 8)) { if (__builtin_constant_p(n) && (n <= 8)) {
unsigned long ret; unsigned long ret;
...@@ -397,8 +400,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) ...@@ -397,8 +400,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
#endif /* __powerpc64__ */ #endif /* __powerpc64__ */
static inline unsigned long static inline unsigned long __copy_from_user(void *to,
__copy_from_user(void *to, const void __user *from, unsigned long size) const void __user *from, unsigned long size)
{ {
might_sleep(); might_sleep();
#ifndef __powerpc64__ #ifndef __powerpc64__
...@@ -408,8 +411,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long size) ...@@ -408,8 +411,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long size)
#endif /* __powerpc64__ */ #endif /* __powerpc64__ */
} }
static inline unsigned long static inline unsigned long __copy_to_user(void __user *to,
__copy_to_user(void __user *to, const void *from, unsigned long size) const void *from, unsigned long size)
{ {
might_sleep(); might_sleep();
#ifndef __powerpc64__ #ifndef __powerpc64__
...@@ -419,21 +422,6 @@ __copy_to_user(void __user *to, const void *from, unsigned long size) ...@@ -419,21 +422,6 @@ __copy_to_user(void __user *to, const void *from, unsigned long size)
#endif /* __powerpc64__ */ #endif /* __powerpc64__ */
} }
#ifndef __powerpc64__
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#else /* __powerpc64__ */
#define __copy_in_user(to, from, size) \
__copy_tofrom_user((to), (from), (size))
extern unsigned long copy_from_user(void *to, const void __user *from,
unsigned long n);
extern unsigned long copy_to_user(void __user *to, const void *from,
unsigned long n);
extern unsigned long copy_in_user(void __user *to, const void __user *from,
unsigned long n);
#endif /* __powerpc64__ */
extern unsigned long __clear_user(void __user *addr, unsigned long size); extern unsigned long __clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size) static inline unsigned long clear_user(void __user *addr, unsigned long size)
...@@ -466,11 +454,7 @@ static inline long strncpy_from_user(char *dst, const char __user *src, ...@@ -466,11 +454,7 @@ static inline long strncpy_from_user(char *dst, const char __user *src,
* *
* Return 0 for error * Return 0 for error
*/ */
#ifndef __powerpc64__
extern int __strnlen_user(const char __user *str, long len, unsigned long top); extern int __strnlen_user(const char __user *str, long len, unsigned long top);
#else /* __powerpc64__ */
extern int __strnlen_user(const char __user *str, long len);
#endif /* __powerpc64__ */
/* /*
* Returns the length of the string at str (including the null byte), * Returns the length of the string at str (including the null byte),
...@@ -482,18 +466,11 @@ extern int __strnlen_user(const char __user *str, long len); ...@@ -482,18 +466,11 @@ extern int __strnlen_user(const char __user *str, long len);
*/ */
static inline int strnlen_user(const char __user *str, long len) static inline int strnlen_user(const char __user *str, long len)
{ {
#ifndef __powerpc64__
unsigned long top = current->thread.fs.seg; unsigned long top = current->thread.fs.seg;
if ((unsigned long)str > top) if ((unsigned long)str > top)
return 0; return 0;
return __strnlen_user(str, len, top); return __strnlen_user(str, len, top);
#else /* __powerpc64__ */
might_sleep();
if (likely(access_ok(VERIFY_READ, str, 1)))
return __strnlen_user(str, len);
return 0;
#endif /* __powerpc64__ */
} }
#define strlen_user(str) strnlen_user((str), 0x7ffffffe) #define strlen_user(str) strnlen_user((str), 0x7ffffffe)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment