x86/hweight: Force inlining of __arch_hweight{32,64}()
authorDenys Vlasenko <dvlasenk@redhat.com>
Tue, 4 Aug 2015 14:15:15 +0000 (16:15 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 5 Aug 2015 07:38:09 +0000 (09:38 +0200)
With this config:

  http://busybox.net/~vda/kernel_config_OPTIMIZE_INLINING_and_Os

gcc-4.7.2 generates many copies of these tiny functions:

__arch_hweight32 (35 copies):
55                      push   %rbp
e8 66 9b 4a 00          callq  __sw_hweight32
48 89 e5                mov    %rsp,%rbp
5d                      pop    %rbp
c3                      retq

__arch_hweight64 (8 copies):
55                      push   %rbp
e8 5e c2 8a 00          callq  __sw_hweight64
48 89 e5                mov    %rsp,%rbp
5d                      pop    %rbp
c3                      retq

See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66122

This patch fixes this via s/inline/__always_inline/

To avoid touching 32-bit case where such change was not tested
to be a win, reformat __arch_hweight64() to have completely
disjoint 64-bit and 32-bit implementations. IOW: made #ifdef /
32 bits and 64 bits instead of having #ifdef / #else / #endif
inside a single function body. Only 64-bit __arch_hweight64() is
__always_inline'd.

    text     data      bss       dec  filename
86971120 17195912 36659200 140826232  vmlinux.before
86970954 17195912 36659200 140826066  vmlinux

Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Graf <tgraf@suug.ch>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/1438697716-28121-2-git-send-email-dvlasenk@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/arch_hweight.h

index 9686c3d..259a7c1 100644 (file)
@@ -21,7 +21,7 @@
  * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
  * compiler switches.
  */
-static inline unsigned int __arch_hweight32(unsigned int w)
+static __always_inline unsigned int __arch_hweight32(unsigned int w)
 {
        unsigned int res = 0;
 
@@ -42,20 +42,23 @@ static inline unsigned int __arch_hweight8(unsigned int w)
        return __arch_hweight32(w & 0xff);
 }
 
+#ifdef CONFIG_X86_32
 static inline unsigned long __arch_hweight64(__u64 w)
 {
-       unsigned long res = 0;
-
-#ifdef CONFIG_X86_32
        return  __arch_hweight32((u32)w) +
                __arch_hweight32((u32)(w >> 32));
+}
 #else
+static __always_inline unsigned long __arch_hweight64(__u64 w)
+{
+       unsigned long res = 0;
+
        asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
                     : "="REG_OUT (res)
                     : REG_IN (w));
-#endif /* CONFIG_X86_32 */
 
        return res;
 }
+#endif /* CONFIG_X86_32 */
 
 #endif