Crypto++  8.4
Free C++ class library of cryptographic schemes
gcm_simd.cpp
1 // gcm_simd.cpp - written and placed in the public domain by
2 // Jeffrey Walton, Uri Blumenthal and Marcel Raad.
3 // Original x86 CLMUL by Wei Dai. ARM and POWER8
4 // PMULL and VMULL by JW, UB and MR.
5 //
6 // This source file uses intrinsics to gain access to SSE4.2 and
7 // ARMv8a CRC-32 and CRC-32C instructions. A separate source file
8 // is needed because additional CXXFLAGS are required to enable
9 // the appropriate instructions sets in some build configurations.
10 
11 #include "pch.h"
12 #include "config.h"
13 #include "misc.h"
14 
15 #if defined(CRYPTOPP_DISABLE_GCM_ASM)
16 # undef CRYPTOPP_X86_ASM_AVAILABLE
17 # undef CRYPTOPP_X32_ASM_AVAILABLE
18 # undef CRYPTOPP_X64_ASM_AVAILABLE
19 # undef CRYPTOPP_SSE2_ASM_AVAILABLE
20 #endif
21 
22 #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE)
23 # include <emmintrin.h>
24 # include <xmmintrin.h>
25 #endif
26 
27 #if (CRYPTOPP_CLMUL_AVAILABLE)
28 # include <tmmintrin.h>
29 # include <wmmintrin.h>
30 #endif
31 
32 #if (CRYPTOPP_ARM_NEON_HEADER)
33 # include <arm_neon.h>
34 #endif
35 
36 #if (CRYPTOPP_ARM_ACLE_HEADER)
37 # include <stdint.h>
38 # include <arm_acle.h>
39 #endif
40 
41 #if defined(CRYPTOPP_ARM_PMULL_AVAILABLE)
42 # include "arm_simd.h"
43 #endif
44 
45 #if defined(CRYPTOPP_ALTIVEC_AVAILABLE)
46 # include "ppc_simd.h"
47 #endif
48 
49 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
50 # include <signal.h>
51 # include <setjmp.h>
52 #endif
53 
54 #ifndef EXCEPTION_EXECUTE_HANDLER
55 # define EXCEPTION_EXECUTE_HANDLER 1
56 #endif
57 
58 // Clang intrinsic casts, http://bugs.llvm.org/show_bug.cgi?id=20670
59 #define M128_CAST(x) ((__m128i *)(void *)(x))
60 #define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
61 
62 // Squash MS LNK4221 and libtool warnings
63 extern const char GCM_SIMD_FNAME[] = __FILE__;
64 
65 NAMESPACE_BEGIN(CryptoPP)
66 
67 // ************************* Feature Probes ************************* //
68 
69 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
70 extern "C" {
71  typedef void (*SigHandler)(int);
72 
73  static jmp_buf s_jmpSIGILL;
74  static void SigIllHandler(int)
75  {
76  longjmp(s_jmpSIGILL, 1);
77  }
78 }
79 #endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY
80 
81 #if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARMV8)
82 bool CPU_ProbePMULL()
83 {
84 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
85  return false;
86 #elif (CRYPTOPP_ARM_PMULL_AVAILABLE)
87 # if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY)
88  volatile bool result = true;
89  __try
90  {
91  // Linaro is missing a lot of pmull gear. Also see http://github.com/weidai11/cryptopp/issues/233.
92  const uint64_t wa1[]={0,0x9090909090909090}, wb1[]={0,0xb0b0b0b0b0b0b0b0};
93  const uint64x2_t a1=vld1q_u64(wa1), b1=vld1q_u64(wb1);
94 
95  const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
96  0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
97  wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
98  0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
99  const uint8x16_t a2=vld1q_u8(wa2), b2=vld1q_u8(wb2);
100 
101  const uint64x2_t r1 = PMULL_00(a1, b1);
102  const uint64x2_t r2 = PMULL_11(vreinterpretq_u64_u8(a2),
103  vreinterpretq_u64_u8(b2));
104 
105  result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
106  vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
107  vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
108  vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
109  }
110  __except (EXCEPTION_EXECUTE_HANDLER)
111  {
112  return false;
113  }
114  return result;
115 # else
116 
117  // longjmp and clobber warnings. Volatile is required.
118  volatile bool result = true;
119 
120  volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
121  if (oldHandler == SIG_ERR)
122  return false;
123 
124  volatile sigset_t oldMask;
125  if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
126  {
127  signal(SIGILL, oldHandler);
128  return false;
129  }
130 
131  if (setjmp(s_jmpSIGILL))
132  result = false;
133  else
134  {
135  // Linaro is missing a lot of pmull gear. Also see http://github.com/weidai11/cryptopp/issues/233.
136  const uint64_t wa1[]={0,0x9090909090909090}, wb1[]={0,0xb0b0b0b0b0b0b0b0};
137  const uint64x2_t a1=vld1q_u64(wa1), b1=vld1q_u64(wb1);
138 
139  const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
140  0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
141  wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
142  0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
143  const uint8x16_t a2=vld1q_u8(wa2), b2=vld1q_u8(wb2);
144 
145  const uint64x2_t r1 = PMULL_00(a1, b1);
146  const uint64x2_t r2 = PMULL_11(vreinterpretq_u64_u8(a2),
147  vreinterpretq_u64_u8(b2));
148 
149  result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
150  vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
151  vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
152  vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
153  }
154 
155  sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
156  signal(SIGILL, oldHandler);
157  return result;
158 # endif
159 #else
160  return false;
161 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE
162 }
163 #endif // ARM32 or ARM64
164 
165 #if (CRYPTOPP_BOOL_PPC32 || CRYPTOPP_BOOL_PPC64)
166 bool CPU_ProbePMULL()
167 {
168 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
169  return false;
170 #elif (CRYPTOPP_POWER8_VMULL_AVAILABLE)
171  // longjmp and clobber warnings. Volatile is required.
172  volatile bool result = true;
173 
174  volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
175  if (oldHandler == SIG_ERR)
176  return false;
177 
178  volatile sigset_t oldMask;
179  if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
180  {
181  signal(SIGILL, oldHandler);
182  return false;
183  }
184 
185  if (setjmp(s_jmpSIGILL))
186  result = false;
187  else
188  {
189  const uint64_t wa1[]={0,W64LIT(0x9090909090909090)},
190  wb1[]={0,W64LIT(0xb0b0b0b0b0b0b0b0)};
191  const uint64x2_p a1=VecLoad(wa1), b1=VecLoad(wb1);
192 
193  const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
194  0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
195  wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
196  0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
197  const uint32x4_p a2=VecLoad(wa2), b2=VecLoad(wb2);
198 
199  const uint64x2_p r1 = VecIntelMultiply11(a1, b1);
200  const uint64x2_p r2 = VecIntelMultiply11((uint64x2_p)a2, (uint64x2_p)b2);
201 
202  const uint64_t wc1[]={W64LIT(0x5300530053005300), W64LIT(0x5300530053005300)},
203  wc2[]={W64LIT(0x6c006c006c006c00), W64LIT(0x6c006c006c006c00)};
204  const uint64x2_p c1=VecLoad(wc1), c2=VecLoad(wc2);
205 
206  result = !!(VecEqual(r1, c1) && VecEqual(r2, c2));
207  }
208 
209  sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
210  signal(SIGILL, oldHandler);
211  return result;
212 #else
213  return false;
214 #endif // CRYPTOPP_POWER8_VMULL_AVAILABLE
215 }
216 #endif // PPC32 or PPC64
217 
218 // *************************** ARM NEON *************************** //
219 
220 #if CRYPTOPP_ARM_NEON_AVAILABLE
221 void GCM_Xor16_NEON(byte *a, const byte *b, const byte *c)
222 {
223  vst1q_u8(a, veorq_u8(vld1q_u8(b), vld1q_u8(c)));
224 }
225 #endif // CRYPTOPP_ARM_NEON_AVAILABLE
226 
227 #if CRYPTOPP_ARM_PMULL_AVAILABLE
228 
229 // Swaps high and low 64-bit words
230 inline uint64x2_t SwapWords(const uint64x2_t& data)
231 {
232  return (uint64x2_t)vcombine_u64(
233  vget_high_u64(data), vget_low_u64(data));
234 }
235 
236 uint64x2_t GCM_Reduce_PMULL(uint64x2_t c0, uint64x2_t c1, uint64x2_t c2, const uint64x2_t &r)
237 {
238  c1 = veorq_u64(c1, VEXT_U8<8>(vdupq_n_u64(0), c0));
239  c1 = veorq_u64(c1, PMULL_01(c0, r));
240  c0 = VEXT_U8<8>(c0, vdupq_n_u64(0));
241  c0 = vshlq_n_u64(veorq_u64(c0, c1), 1);
242  c0 = PMULL_00(c0, r);
243  c2 = veorq_u64(c2, c0);
244  c2 = veorq_u64(c2, VEXT_U8<8>(c1, vdupq_n_u64(0)));
245  c1 = vshrq_n_u64(vcombine_u64(vget_low_u64(c1), vget_low_u64(c2)), 63);
246  c2 = vshlq_n_u64(c2, 1);
247 
248  return veorq_u64(c2, c1);
249 }
250 
251 uint64x2_t GCM_Multiply_PMULL(const uint64x2_t &x, const uint64x2_t &h, const uint64x2_t &r)
252 {
253  const uint64x2_t c0 = PMULL_00(x, h);
254  const uint64x2_t c1 = veorq_u64(PMULL_10(x, h), PMULL_01(x, h));
255  const uint64x2_t c2 = PMULL_11(x, h);
256 
257  return GCM_Reduce_PMULL(c0, c1, c2, r);
258 }
259 
260 void GCM_SetKeyWithoutResync_PMULL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
261 {
262  const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
263  const uint64x2_t t = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(hashKey)));
264  const uint64x2_t h0 = vextq_u64(t, t, 1);
265 
266  uint64x2_t h = h0;
267  unsigned int i;
268  for (i=0; i<tableSize-32; i+=32)
269  {
270  const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
271  vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
272  vst1q_u64((uint64_t *)(mulTable+i+16), h1);
273  vst1q_u64((uint64_t *)(mulTable+i+8), h);
274  vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
275  h = GCM_Multiply_PMULL(h1, h0, r);
276  }
277 
278  const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
279  vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
280  vst1q_u64((uint64_t *)(mulTable+i+16), h1);
281  vst1q_u64((uint64_t *)(mulTable+i+8), h);
282  vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
283 }
284 
285 size_t GCM_AuthenticateBlocks_PMULL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
286 {
287  const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
288  uint64x2_t x = vreinterpretq_u64_u8(vld1q_u8(hbuffer));
289 
290  while (len >= 16)
291  {
292  size_t i=0, s = UnsignedMin(len/16U, 8U);
293  uint64x2_t d1, d2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-1)*16U)));
294  uint64x2_t c0 = vdupq_n_u64(0);
295  uint64x2_t c1 = vdupq_n_u64(0);
296  uint64x2_t c2 = vdupq_n_u64(0);
297 
298  while (true)
299  {
300  const uint64x2_t h0 = vld1q_u64((const uint64_t*)(mtable+(i+0)*16));
301  const uint64x2_t h1 = vld1q_u64((const uint64_t*)(mtable+(i+1)*16));
302  const uint64x2_t h2 = veorq_u64(h0, h1);
303 
304  if (++i == s)
305  {
306  const uint64x2_t t1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
307  d1 = veorq_u64(vextq_u64(t1, t1, 1), x);
308  c0 = veorq_u64(c0, PMULL_00(d1, h0));
309  c2 = veorq_u64(c2, PMULL_10(d1, h1));
310  d1 = veorq_u64(d1, SwapWords(d1));
311  c1 = veorq_u64(c1, PMULL_00(d1, h2));
312 
313  break;
314  }
315 
316  d1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
317  c0 = veorq_u64(c0, PMULL_10(d2, h0));
318  c2 = veorq_u64(c2, PMULL_10(d1, h1));
319  d2 = veorq_u64(d2, d1);
320  c1 = veorq_u64(c1, PMULL_10(d2, h2));
321 
322  if (++i == s)
323  {
324  const uint64x2_t t2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
325  d1 = veorq_u64(vextq_u64(t2, t2, 1), x);
326  c0 = veorq_u64(c0, PMULL_01(d1, h0));
327  c2 = veorq_u64(c2, PMULL_11(d1, h1));
328  d1 = veorq_u64(d1, SwapWords(d1));
329  c1 = veorq_u64(c1, PMULL_01(d1, h2));
330 
331  break;
332  }
333 
334  const uint64x2_t t3 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
335  d2 = vextq_u64(t3, t3, 1);
336  c0 = veorq_u64(c0, PMULL_01(d1, h0));
337  c2 = veorq_u64(c2, PMULL_01(d2, h1));
338  d1 = veorq_u64(d1, d2);
339  c1 = veorq_u64(c1, PMULL_01(d1, h2));
340  }
341  data += s*16;
342  len -= s*16;
343 
344  c1 = veorq_u64(veorq_u64(c1, c0), c2);
345  x = GCM_Reduce_PMULL(c0, c1, c2, r);
346  }
347 
348  vst1q_u64(reinterpret_cast<uint64_t *>(hbuffer), x);
349  return len;
350 }
351 
352 void GCM_ReverseHashBufferIfNeeded_PMULL(byte *hashBuffer)
353 {
355  {
356  const uint8x16_t x = vrev64q_u8(vld1q_u8(hashBuffer));
357  vst1q_u8(hashBuffer, vextq_u8(x, x, 8));
358  }
359 }
360 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE
361 
362 // ***************************** SSE ***************************** //
363 
364 #if CRYPTOPP_SSE2_INTRIN_AVAILABLE || CRYPTOPP_SSE2_ASM_AVAILABLE
365 // SunCC 5.10-5.11 compiler crash. Move GCM_Xor16_SSE2 out-of-line, and place in
366 // a source file with a SSE architecture switch. Also see GH #226 and GH #284.
367 void GCM_Xor16_SSE2(byte *a, const byte *b, const byte *c)
368 {
369 # if CRYPTOPP_SSE2_ASM_AVAILABLE && defined(__GNUC__)
370  asm ("movdqa %1, %%xmm0; pxor %2, %%xmm0; movdqa %%xmm0, %0;"
371  : "=m" (a[0]) : "m"(b[0]), "m"(c[0]));
372 # else // CRYPTOPP_SSE2_INTRIN_AVAILABLE
373  _mm_store_si128(M128_CAST(a), _mm_xor_si128(
374  _mm_load_si128(CONST_M128_CAST(b)),
375  _mm_load_si128(CONST_M128_CAST(c))));
376 # endif
377 }
378 #endif // CRYPTOPP_SSE2_ASM_AVAILABLE
379 
380 #if CRYPTOPP_CLMUL_AVAILABLE
381 
382 #if 0
383 // preserved for testing
384 void gcm_gf_mult(const unsigned char *a, const unsigned char *b, unsigned char *c)
385 {
386  word64 Z0=0, Z1=0, V0, V1;
387 
389  Block::Get(a)(V0)(V1);
390 
391  for (int i=0; i<16; i++)
392  {
393  for (int j=0x80; j!=0; j>>=1)
394  {
395  int x = b[i] & j;
396  Z0 ^= x ? V0 : 0;
397  Z1 ^= x ? V1 : 0;
398  x = (int)V1 & 1;
399  V1 = (V1>>1) | (V0<<63);
400  V0 = (V0>>1) ^ (x ? W64LIT(0xe1) << 56 : 0);
401  }
402  }
403  Block::Put(NULLPTR, c)(Z0)(Z1);
404 }
405 
406 __m128i _mm_clmulepi64_si128(const __m128i &a, const __m128i &b, int i)
407 {
408  word64 A[1] = {ByteReverse(((word64*)&a)[i&1])};
409  word64 B[1] = {ByteReverse(((word64*)&b)[i>>4])};
410 
411  PolynomialMod2 pa((byte *)A, 8);
412  PolynomialMod2 pb((byte *)B, 8);
413  PolynomialMod2 c = pa*pb;
414 
415  __m128i output;
416  for (int i=0; i<16; i++)
417  ((byte *)&output)[i] = c.GetByte(i);
418  return output;
419 }
420 #endif // Testing
421 
422 // Swaps high and low 64-bit words
423 inline __m128i SwapWords(const __m128i& val)
424 {
425  return _mm_shuffle_epi32(val, _MM_SHUFFLE(1, 0, 3, 2));
426 }
427 
428 // SunCC 5.11-5.15 compiler crash. Make the function inline
429 // and parameters non-const. Also see GH #188 and GH #224.
430 inline __m128i GCM_Reduce_CLMUL(__m128i c0, __m128i c1, __m128i c2, const __m128i& r)
431 {
432  /*
433  The polynomial to be reduced is c0 * x^128 + c1 * x^64 + c2. c0t below refers to the most
434  significant half of c0 as a polynomial, which, due to GCM's bit reflection, are in the
435  rightmost bit positions, and the lowest byte addresses.
436 
437  c1 ^= c0t * 0xc200000000000000
438  c2t ^= c0t
439  t = shift (c1t ^ c0b) left 1 bit
440  c2 ^= t * 0xe100000000000000
441  c2t ^= c1b
442  shift c2 left 1 bit and xor in lowest bit of c1t
443  */
444  c1 = _mm_xor_si128(c1, _mm_slli_si128(c0, 8));
445  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(c0, r, 0x10));
446  c0 = _mm_xor_si128(c1, _mm_srli_si128(c0, 8));
447  c0 = _mm_slli_epi64(c0, 1);
448  c0 = _mm_clmulepi64_si128(c0, r, 0);
449  c2 = _mm_xor_si128(c2, c0);
450  c2 = _mm_xor_si128(c2, _mm_srli_si128(c1, 8));
451  c1 = _mm_unpacklo_epi64(c1, c2);
452  c1 = _mm_srli_epi64(c1, 63);
453  c2 = _mm_slli_epi64(c2, 1);
454  return _mm_xor_si128(c2, c1);
455 }
456 
457 // SunCC 5.13-5.14 compiler crash. Don't make the function inline.
458 // This is in contrast to GCM_Reduce_CLMUL, which must be inline.
459 __m128i GCM_Multiply_CLMUL(const __m128i &x, const __m128i &h, const __m128i &r)
460 {
461  const __m128i c0 = _mm_clmulepi64_si128(x,h,0);
462  const __m128i c1 = _mm_xor_si128(_mm_clmulepi64_si128(x,h,1), _mm_clmulepi64_si128(x,h,0x10));
463  const __m128i c2 = _mm_clmulepi64_si128(x,h,0x11);
464 
465  return GCM_Reduce_CLMUL(c0, c1, c2, r);
466 }
467 
468 void GCM_SetKeyWithoutResync_CLMUL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
469 {
470  const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
471  const __m128i m = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
472  __m128i h0 = _mm_shuffle_epi8(_mm_load_si128(CONST_M128_CAST(hashKey)), m), h = h0;
473 
474  unsigned int i;
475  for (i=0; i<tableSize-32; i+=32)
476  {
477  const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
478  _mm_storel_epi64(M128_CAST(mulTable+i), h);
479  _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
480  _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
481  _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
482  h = GCM_Multiply_CLMUL(h1, h0, r);
483  }
484 
485  const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
486  _mm_storel_epi64(M128_CAST(mulTable+i), h);
487  _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
488  _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
489  _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
490 }
491 
492 size_t GCM_AuthenticateBlocks_CLMUL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
493 {
494  const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
495  const __m128i m1 = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
496  const __m128i m2 = _mm_set_epi32(0x08090a0b, 0x0c0d0e0f, 0x00010203, 0x04050607);
497  __m128i x = _mm_load_si128(M128_CAST(hbuffer));
498 
499  while (len >= 16)
500  {
501  size_t i=0, s = UnsignedMin(len/16, 8U);
502  __m128i d1 = _mm_loadu_si128(CONST_M128_CAST(data+(s-1)*16));
503  __m128i d2 = _mm_shuffle_epi8(d1, m2);
504  __m128i c0 = _mm_setzero_si128();
505  __m128i c1 = _mm_setzero_si128();
506  __m128i c2 = _mm_setzero_si128();
507 
508  while (true)
509  {
510  const __m128i h0 = _mm_load_si128(CONST_M128_CAST(mtable+(i+0)*16));
511  const __m128i h1 = _mm_load_si128(CONST_M128_CAST(mtable+(i+1)*16));
512  const __m128i h2 = _mm_xor_si128(h0, h1);
513 
514  if (++i == s)
515  {
516  d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
517  d1 = _mm_xor_si128(d1, x);
518  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0));
519  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
520  d1 = _mm_xor_si128(d1, SwapWords(d1));
521  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0));
522  break;
523  }
524 
525  d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m2);
526  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d2, h0, 1));
527  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
528  d2 = _mm_xor_si128(d2, d1);
529  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d2, h2, 1));
530 
531  if (++i == s)
532  {
533  d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
534  d1 = _mm_xor_si128(d1, x);
535  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
536  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 0x11));
537  d1 = _mm_xor_si128(d1, SwapWords(d1));
538  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
539  break;
540  }
541 
542  d2 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m1);
543  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
544  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d2, h1, 0x10));
545  d1 = _mm_xor_si128(d1, d2);
546  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
547  }
548  data += s*16;
549  len -= s*16;
550 
551  c1 = _mm_xor_si128(_mm_xor_si128(c1, c0), c2);
552  x = GCM_Reduce_CLMUL(c0, c1, c2, r);
553  }
554 
555  _mm_store_si128(M128_CAST(hbuffer), x);
556  return len;
557 }
558 
559 void GCM_ReverseHashBufferIfNeeded_CLMUL(byte *hashBuffer)
560 {
561  // SSSE3 instruction, but only used with CLMUL
562  const __m128i mask = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
563  _mm_storeu_si128(M128_CAST(hashBuffer), _mm_shuffle_epi8(
564  _mm_loadu_si128(CONST_M128_CAST(hashBuffer)), mask));
565 }
566 #endif // CRYPTOPP_CLMUL_AVAILABLE
567 
568 // ***************************** POWER8 ***************************** //
569 
570 #if CRYPTOPP_POWER8_AVAILABLE
571 void GCM_Xor16_POWER8(byte *a, const byte *b, const byte *c)
572 {
573  VecStore(VecXor(VecLoad(b), VecLoad(c)), a);
574 }
575 #endif // CRYPTOPP_POWER8_AVAILABLE
576 
577 #if CRYPTOPP_POWER8_VMULL_AVAILABLE
578 
579 uint64x2_p GCM_Reduce_VMULL(uint64x2_p c0, uint64x2_p c1, uint64x2_p c2, uint64x2_p r)
580 {
581  const uint64x2_p m1 = {1,1}, m63 = {63,63};
582 
583  c1 = VecXor(c1, VecShiftRightOctet<8>(c0));
584  c1 = VecXor(c1, VecIntelMultiply10(c0, r));
585  c0 = VecXor(c1, VecShiftLeftOctet<8>(c0));
586  c0 = VecIntelMultiply00(vec_sl(c0, m1), r);
587  c2 = VecXor(c2, c0);
588  c2 = VecXor(c2, VecShiftLeftOctet<8>(c1));
589  c1 = vec_sr(vec_mergeh(c1, c2), m63);
590  c2 = vec_sl(c2, m1);
591 
592  return VecXor(c2, c1);
593 }
594 
595 inline uint64x2_p GCM_Multiply_VMULL(uint64x2_p x, uint64x2_p h, uint64x2_p r)
596 {
597  const uint64x2_p c0 = VecIntelMultiply00(x, h);
598  const uint64x2_p c1 = VecXor(VecIntelMultiply01(x, h), VecIntelMultiply10(x, h));
599  const uint64x2_p c2 = VecIntelMultiply11(x, h);
600 
601  return GCM_Reduce_VMULL(c0, c1, c2, r);
602 }
603 
604 inline uint64x2_p LoadHashKey(const byte *hashKey)
605 {
606 #if (CRYPTOPP_BIG_ENDIAN)
607  const uint64x2_p key = (uint64x2_p)VecLoad(hashKey);
608  const uint8x16_p mask = {8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7};
609  return VecPermute(key, key, mask);
610 #else
611  const uint64x2_p key = (uint64x2_p)VecLoad(hashKey);
612  const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
613  return VecPermute(key, key, mask);
614 #endif
615 }
616 
617 void GCM_SetKeyWithoutResync_VMULL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
618 {
619  const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
620  uint64x2_p h = LoadHashKey(hashKey), h0 = h;
621 
622  unsigned int i;
623  uint64_t temp[2];
624 
625  for (i=0; i<tableSize-32; i+=32)
626  {
627  const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
628  VecStore(h, (byte*)temp);
629  std::memcpy(mulTable+i, temp+0, 8);
630  VecStore(h1, mulTable+i+16);
631  VecStore(h, mulTable+i+8);
632  VecStore(h1, (byte*)temp);
633  std::memcpy(mulTable+i+8, temp+0, 8);
634  h = GCM_Multiply_VMULL(h1, h0, r);
635  }
636 
637  const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
638  VecStore(h, (byte*)temp);
639  std::memcpy(mulTable+i, temp+0, 8);
640  VecStore(h1, mulTable+i+16);
641  VecStore(h, mulTable+i+8);
642  VecStore(h1, (byte*)temp);
643  std::memcpy(mulTable+i+8, temp+0, 8);
644 }
645 
646 // Swaps high and low 64-bit words
647 template <class T>
648 inline T SwapWords(const T& data)
649 {
650  return (T)VecRotateLeftOctet<8>(data);
651 }
652 
653 inline uint64x2_p LoadBuffer1(const byte *dataBuffer)
654 {
655 #if (CRYPTOPP_BIG_ENDIAN)
656  return (uint64x2_p)VecLoad(dataBuffer);
657 #else
658  const uint64x2_p data = (uint64x2_p)VecLoad(dataBuffer);
659  const uint8x16_p mask = {7,6,5,4, 3,2,1,0, 15,14,13,12, 11,10,9,8};
660  return VecPermute(data, data, mask);
661 #endif
662 }
663 
664 inline uint64x2_p LoadBuffer2(const byte *dataBuffer)
665 {
666 #if (CRYPTOPP_BIG_ENDIAN)
667  return (uint64x2_p)SwapWords(VecLoadBE(dataBuffer));
668 #else
669  return (uint64x2_p)VecLoadBE(dataBuffer);
670 #endif
671 }
672 
673 size_t GCM_AuthenticateBlocks_VMULL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
674 {
675  const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
676  uint64x2_p x = (uint64x2_p)VecLoad(hbuffer);
677 
678  while (len >= 16)
679  {
680  size_t i=0, s = UnsignedMin(len/16, 8U);
681  uint64x2_p d1, d2 = LoadBuffer1(data+(s-1)*16);
682  uint64x2_p c0 = {0}, c1 = {0}, c2 = {0};
683 
684  while (true)
685  {
686  const uint64x2_p h0 = (uint64x2_p)VecLoad(mtable+(i+0)*16);
687  const uint64x2_p h1 = (uint64x2_p)VecLoad(mtable+(i+1)*16);
688  const uint64x2_p h2 = (uint64x2_p)VecXor(h0, h1);
689 
690  if (++i == s)
691  {
692  d1 = LoadBuffer2(data);
693  d1 = VecXor(d1, x);
694  c0 = VecXor(c0, VecIntelMultiply00(d1, h0));
695  c2 = VecXor(c2, VecIntelMultiply01(d1, h1));
696  d1 = VecXor(d1, SwapWords(d1));
697  c1 = VecXor(c1, VecIntelMultiply00(d1, h2));
698  break;
699  }
700 
701  d1 = LoadBuffer1(data+(s-i)*16-8);
702  c0 = VecXor(c0, VecIntelMultiply01(d2, h0));
703  c2 = VecXor(c2, VecIntelMultiply01(d1, h1));
704  d2 = VecXor(d2, d1);
705  c1 = VecXor(c1, VecIntelMultiply01(d2, h2));
706 
707  if (++i == s)
708  {
709  d1 = LoadBuffer2(data);
710  d1 = VecXor(d1, x);
711  c0 = VecXor(c0, VecIntelMultiply10(d1, h0));
712  c2 = VecXor(c2, VecIntelMultiply11(d1, h1));
713  d1 = VecXor(d1, SwapWords(d1));
714  c1 = VecXor(c1, VecIntelMultiply10(d1, h2));
715  break;
716  }
717 
718  d2 = LoadBuffer2(data+(s-i)*16-8);
719  c0 = VecXor(c0, VecIntelMultiply10(d1, h0));
720  c2 = VecXor(c2, VecIntelMultiply10(d2, h1));
721  d1 = VecXor(d1, d2);
722  c1 = VecXor(c1, VecIntelMultiply10(d1, h2));
723  }
724  data += s*16;
725  len -= s*16;
726 
727  c1 = VecXor(VecXor(c1, c0), c2);
728  x = GCM_Reduce_VMULL(c0, c1, c2, r);
729  }
730 
731  VecStore(x, hbuffer);
732  return len;
733 }
734 
735 void GCM_ReverseHashBufferIfNeeded_VMULL(byte *hashBuffer)
736 {
737  const uint64x2_p mask = {0x08090a0b0c0d0e0full, 0x0001020304050607ull};
738  VecStore(VecPermute(VecLoad(hashBuffer), mask), hashBuffer);
739 }
740 #endif // CRYPTOPP_POWER8_VMULL_AVAILABLE
741 
742 NAMESPACE_END
arm_simd.h
Support functions for ARM and vector operations.
VecIntelMultiply11
uint64x2_p VecIntelMultiply11(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:2583
W64LIT
#define W64LIT(x)
Declare an unsigned word64.
Definition: config_int.h:119
PMULL_01
uint64x2_t PMULL_01(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:64
uint64x2_p
__vector unsigned long long uint64x2_p
Vector of 64-bit elements.
Definition: ppc_simd.h:212
uint8x16_p
__vector unsigned char uint8x16_p
Vector of 8-bit elements.
Definition: ppc_simd.h:192
VecIntelMultiply00
uint64x2_p VecIntelMultiply00(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:2517
BIG_ENDIAN_ORDER
@ BIG_ENDIAN_ORDER
byte order is big-endian
Definition: cryptlib.h:147
PolynomialMod2
Polynomial with Coefficients in GF(2)
Definition: gf2n.h:27
M128_CAST
#define M128_CAST(x)
Clang workaround.
Definition: adv_simd.h:609
VecStore
void VecStore(const T data, byte dest[16])
Stores a vector to a byte array.
Definition: ppc_simd.h:895
UnsignedMin
const T1 UnsignedMin(const T1 &a, const T2 &b)
Safe comparison of values that could be neagtive and incorrectly promoted.
Definition: misc.h:674
PMULL_10
uint64x2_t PMULL_10(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:94
word64
unsigned long long word64
64-bit unsigned datatype
Definition: config_int.h:91
uint32x4_p
__vector unsigned int uint32x4_p
Vector of 32-bit elements.
Definition: ppc_simd.h:202
pch.h
Precompiled header file.
misc.h
Utility functions for the Crypto++ library.
PutBlock
Access a block of memory.
Definition: misc.h:2550
BlockGetAndPut
Access a block of memory.
Definition: misc.h:2587
PMULL_11
uint64x2_t PMULL_11(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:124
PMULL_00
uint64x2_t PMULL_00(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:34
VecLoad
uint32x4_p VecLoad(const byte src[16])
Loads a vector from a byte array.
Definition: ppc_simd.h:369
VecLoadBE
uint32x4_p VecLoadBE(const byte src[16])
Loads a vector from a byte array.
Definition: ppc_simd.h:742
GetNativeByteOrder
ByteOrder GetNativeByteOrder()
Returns NativeByteOrder as an enumerated ByteOrder value.
Definition: misc.h:1243
VecIntelMultiply10
uint64x2_p VecIntelMultiply10(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:2561
VecXor
T1 VecXor(const T1 vec1, const T2 vec2)
XOR two vectors.
Definition: ppc_simd.h:1414
VecEqual
bool VecEqual(const T1 vec1, const T2 vec2)
Compare two vectors.
Definition: ppc_simd.h:1975
ByteReverse
byte ByteReverse(byte value)
Reverses bytes in a 8-bit value.
Definition: misc.h:2001
CryptoPP
Crypto++ library namespace.
config.h
Library configuration file.
CONST_M128_CAST
#define CONST_M128_CAST(x)
Clang workaround.
Definition: adv_simd.h:614
VecPermute
T1 VecPermute(const T1 vec, const T2 mask)
Permutes a vector.
Definition: ppc_simd.h:1478
ppc_simd.h
Support functions for PowerPC and vector operations.
VecIntelMultiply01
uint64x2_p VecIntelMultiply01(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:2539