diff options
author | Fenghua Yu <fenghua.yu@intel.com> | 2013-11-16 12:37:01 -0800 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2013-11-16 18:00:58 -0800 |
commit | f4cb1cc18f364d761d5614eb6293cccc6647f259 (patch) | |
tree | af6be6a073d887171d49401ba9dddaed9a537a99 | |
parent | 1213959d4ad2f523290d0d7c94f712edef63852c (diff) | |
download | kernel-common-f4cb1cc18f364d761d5614eb6293cccc6647f259.tar.gz kernel-common-f4cb1cc18f364d761d5614eb6293cccc6647f259.tar.bz2 kernel-common-f4cb1cc18f364d761d5614eb6293cccc6647f259.zip |
x86-64, copy_user: Remove zero byte check before copy user buffer.
Operation of rep movsb instruction handles zero byte copy. As pointed out by
Linus, there is no need to check zero size in kernel. Removing this redundant
check saves a few cycles in copy user functions.
Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
Link: http://lkml.kernel.org/r/1384634221-6006-1-git-send-email-fenghua.yu@intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r-- | arch/x86/lib/copy_user_64.S | 8 |
1 files changed, 2 insertions, 6 deletions
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index a30ca15be21c..ffe4eb9f09eb 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -236,8 +236,6 @@ ENDPROC(copy_user_generic_unrolled) ENTRY(copy_user_generic_string) CFI_STARTPROC ASM_STAC - andl %edx,%edx - jz 4f cmpl $8,%edx jb 2f /* less than 8 bytes, go to byte copy loop */ ALIGN_DESTINATION @@ -249,7 +247,7 @@ ENTRY(copy_user_generic_string) 2: movl %edx,%ecx 3: rep movsb -4: xorl %eax,%eax + xorl %eax,%eax ASM_CLAC ret @@ -279,12 +277,10 @@ ENDPROC(copy_user_generic_string) ENTRY(copy_user_enhanced_fast_string) CFI_STARTPROC ASM_STAC - andl %edx,%edx - jz 2f movl %edx,%ecx 1: rep movsb -2: xorl %eax,%eax + xorl %eax,%eax ASM_CLAC ret |