652439393b37b51b47f1f0b12375314334eae1f0
[openwrt/staging/hauke.git] / target / linux / generic / backport-5.4 / 080-wireguard-0059-crypto-x86-chacha-sse3-use-unaligned-loads-for-state.patch
1 From 833ca409e17c10f4affb5879e22a03fdf1933439 Mon Sep 17 00:00:00 2001
2 From: Ard Biesheuvel <ardb@kernel.org>
3 Date: Wed, 8 Jul 2020 12:11:18 +0300
4 Subject: [PATCH 059/124] crypto: x86/chacha-sse3 - use unaligned loads for
5 state array
6
7 commit e79a31715193686e92dadb4caedfbb1f5de3659c upstream.
8
9 Due to the fact that the x86 port does not support allocating objects
10 on the stack with an alignment that exceeds 8 bytes, we have a rather
11 ugly hack in the x86 code for ChaCha to ensure that the state array is
12 aligned to 16 bytes, allowing the SSE3 implementation of the algorithm
13 to use aligned loads.
14
15 Given that the performance benefit of using of aligned loads appears to
16 be limited (~0.25% for 1k blocks using tcrypt on a Corei7-8650U), and
17 the fact that this hack has leaked into generic ChaCha code, let's just
18 remove it.
19
20 Cc: Martin Willi <martin@strongswan.org>
21 Cc: Herbert Xu <herbert@gondor.apana.org.au>
22 Cc: Eric Biggers <ebiggers@kernel.org>
23 Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
24 Reviewed-by: Martin Willi <martin@strongswan.org>
25 Reviewed-by: Eric Biggers <ebiggers@google.com>
26 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
27 Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
28 ---
29 arch/x86/crypto/chacha-ssse3-x86_64.S | 16 ++++++++--------
30 arch/x86/crypto/chacha_glue.c | 17 ++---------------
31 include/crypto/chacha.h | 4 ----
32 3 files changed, 10 insertions(+), 27 deletions(-)
33
34 --- a/arch/x86/crypto/chacha-ssse3-x86_64.S
35 +++ b/arch/x86/crypto/chacha-ssse3-x86_64.S
36 @@ -120,10 +120,10 @@ ENTRY(chacha_block_xor_ssse3)
37 FRAME_BEGIN
38
39 # x0..3 = s0..3
40 - movdqa 0x00(%rdi),%xmm0
41 - movdqa 0x10(%rdi),%xmm1
42 - movdqa 0x20(%rdi),%xmm2
43 - movdqa 0x30(%rdi),%xmm3
44 + movdqu 0x00(%rdi),%xmm0
45 + movdqu 0x10(%rdi),%xmm1
46 + movdqu 0x20(%rdi),%xmm2
47 + movdqu 0x30(%rdi),%xmm3
48 movdqa %xmm0,%xmm8
49 movdqa %xmm1,%xmm9
50 movdqa %xmm2,%xmm10
51 @@ -205,10 +205,10 @@ ENTRY(hchacha_block_ssse3)
52 # %edx: nrounds
53 FRAME_BEGIN
54
55 - movdqa 0x00(%rdi),%xmm0
56 - movdqa 0x10(%rdi),%xmm1
57 - movdqa 0x20(%rdi),%xmm2
58 - movdqa 0x30(%rdi),%xmm3
59 + movdqu 0x00(%rdi),%xmm0
60 + movdqu 0x10(%rdi),%xmm1
61 + movdqu 0x20(%rdi),%xmm2
62 + movdqu 0x30(%rdi),%xmm3
63
64 mov %edx,%r8d
65 call chacha_permute
66 --- a/arch/x86/crypto/chacha_glue.c
67 +++ b/arch/x86/crypto/chacha_glue.c
68 @@ -14,8 +14,6 @@
69 #include <linux/module.h>
70 #include <asm/simd.h>
71
72 -#define CHACHA_STATE_ALIGN 16
73 -
74 asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
75 unsigned int len, int nrounds);
76 asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
77 @@ -125,8 +123,6 @@ static void chacha_dosimd(u32 *state, u8
78
79 void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
80 {
81 - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
82 -
83 if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) {
84 hchacha_block_generic(state, stream, nrounds);
85 } else {
86 @@ -139,8 +135,6 @@ EXPORT_SYMBOL(hchacha_block_arch);
87
88 void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
89 {
90 - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
91 -
92 chacha_init_generic(state, key, iv);
93 }
94 EXPORT_SYMBOL(chacha_init_arch);
95 @@ -148,8 +142,6 @@ EXPORT_SYMBOL(chacha_init_arch);
96 void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
97 int nrounds)
98 {
99 - state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
100 -
101 if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() ||
102 bytes <= CHACHA_BLOCK_SIZE)
103 return chacha_crypt_generic(state, dst, src, bytes, nrounds);
104 @@ -171,15 +163,12 @@ EXPORT_SYMBOL(chacha_crypt_arch);
105 static int chacha_simd_stream_xor(struct skcipher_request *req,
106 const struct chacha_ctx *ctx, const u8 *iv)
107 {
108 - u32 *state, state_buf[16 + 2] __aligned(8);
109 + u32 state[CHACHA_STATE_WORDS] __aligned(8);
110 struct skcipher_walk walk;
111 int err;
112
113 err = skcipher_walk_virt(&walk, req, false);
114
115 - BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
116 - state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
117 -
118 chacha_init_generic(state, ctx->key, iv);
119
120 while (walk.nbytes > 0) {
121 @@ -218,12 +207,10 @@ static int xchacha_simd(struct skcipher_
122 {
123 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
124 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
125 - u32 *state, state_buf[16 + 2] __aligned(8);
126 + u32 state[CHACHA_STATE_WORDS] __aligned(8);
127 struct chacha_ctx subctx;
128 u8 real_iv[16];
129
130 - BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
131 - state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
132 chacha_init_generic(state, ctx->key, req->iv);
133
134 if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) {
135 --- a/include/crypto/chacha.h
136 +++ b/include/crypto/chacha.h
137 @@ -25,11 +25,7 @@
138 #define CHACHA_BLOCK_SIZE 64
139 #define CHACHAPOLY_IV_SIZE 12
140
141 -#ifdef CONFIG_X86_64
142 -#define CHACHA_STATE_WORDS ((CHACHA_BLOCK_SIZE + 12) / sizeof(u32))
143 -#else
144 #define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
145 -#endif
146
147 /* 192-bit nonce, then 64-bit stream position */
148 #define XCHACHA_IV_SIZE 32