19 #if (CRYPTOPP_SSSE3_AVAILABLE)
21 # include <pmmintrin.h>
22 # include <tmmintrin.h>
26 # include <ammintrin.h>
27 # if defined(__GNUC__)
28 # include <x86intrin.h>
32 #if (CRYPTOPP_ARM_NEON_HEADER)
34 # include <arm_neon.h>
37 #if (CRYPTOPP_ARM_ACLE_HEADER)
39 # include <arm_acle.h>
46 #if defined(CRYPTOPP_ALTIVEC_AVAILABLE)
52 extern const char SPECK128_SIMD_FNAME[] = __FILE__;
54 ANONYMOUS_NAMESPACE_BEGIN
62 #if (CRYPTOPP_ARM_NEON_AVAILABLE)
65 #if defined(_MSC_VER) && !defined(_M_ARM64)
66 inline uint64x2_t vld1q_dup_u64(
const uint64_t* ptr)
68 return vmovq_n_u64(*ptr);
73 inline T UnpackHigh64(
const T& a,
const T& b)
75 const uint64x1_t x(vget_high_u64((uint64x2_t)a));
76 const uint64x1_t y(vget_high_u64((uint64x2_t)b));
77 return (T)vcombine_u64(x, y);
81 inline T UnpackLow64(
const T& a,
const T& b)
83 const uint64x1_t x(vget_low_u64((uint64x2_t)a));
84 const uint64x1_t y(vget_low_u64((uint64x2_t)b));
85 return (T)vcombine_u64(x, y);
88 template <
unsigned int R>
89 inline uint64x2_t RotateLeft64(
const uint64x2_t& val)
91 const uint64x2_t a(vshlq_n_u64(val, R));
92 const uint64x2_t b(vshrq_n_u64(val, 64 - R));
93 return vorrq_u64(a, b);
96 template <
unsigned int R>
97 inline uint64x2_t RotateRight64(
const uint64x2_t& val)
99 const uint64x2_t a(vshlq_n_u64(val, 64 - R));
100 const uint64x2_t b(vshrq_n_u64(val, R));
101 return vorrq_u64(a, b);
104 #if defined(__aarch32__) || defined(__aarch64__)
107 inline uint64x2_t RotateLeft64<8>(
const uint64x2_t& val)
109 const uint8_t maskb[16] = { 7,0,1,2, 3,4,5,6, 15,8,9,10, 11,12,13,14 };
110 const uint8x16_t mask = vld1q_u8(maskb);
112 return vreinterpretq_u64_u8(
113 vqtbl1q_u8(vreinterpretq_u8_u64(val), mask));
118 inline uint64x2_t RotateRight64<8>(
const uint64x2_t& val)
120 const uint8_t maskb[16] = { 1,2,3,4, 5,6,7,0, 9,10,11,12, 13,14,15,8 };
121 const uint8x16_t mask = vld1q_u8(maskb);
123 return vreinterpretq_u64_u8(
124 vqtbl1q_u8(vreinterpretq_u8_u64(val), mask));
128 inline void SPECK128_Enc_Block(uint64x2_t &block0, uint64x2_t &block1,
129 const word64 *subkeys,
unsigned int rounds)
132 uint64x2_t x1 = UnpackHigh64(block0, block1);
133 uint64x2_t y1 = UnpackLow64(block0, block1);
135 for (
size_t i=0; i < static_cast<size_t>(rounds); ++i)
137 const uint64x2_t rk = vld1q_dup_u64(subkeys+i);
139 x1 = RotateRight64<8>(x1);
140 x1 = vaddq_u64(x1, y1);
141 x1 = veorq_u64(x1, rk);
142 y1 = RotateLeft64<3>(y1);
143 y1 = veorq_u64(y1, x1);
147 block0 = UnpackLow64(y1, x1);
148 block1 = UnpackHigh64(y1, x1);
151 inline void SPECK128_Enc_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
152 uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5,
153 const word64 *subkeys,
unsigned int rounds)
156 uint64x2_t x1 = UnpackHigh64(block0, block1);
157 uint64x2_t y1 = UnpackLow64(block0, block1);
158 uint64x2_t x2 = UnpackHigh64(block2, block3);
159 uint64x2_t y2 = UnpackLow64(block2, block3);
160 uint64x2_t x3 = UnpackHigh64(block4, block5);
161 uint64x2_t y3 = UnpackLow64(block4, block5);
163 for (
size_t i=0; i < static_cast<size_t>(rounds); ++i)
165 const uint64x2_t rk = vld1q_dup_u64(subkeys+i);
167 x1 = RotateRight64<8>(x1);
168 x2 = RotateRight64<8>(x2);
169 x3 = RotateRight64<8>(x3);
170 x1 = vaddq_u64(x1, y1);
171 x2 = vaddq_u64(x2, y2);
172 x3 = vaddq_u64(x3, y3);
173 x1 = veorq_u64(x1, rk);
174 x2 = veorq_u64(x2, rk);
175 x3 = veorq_u64(x3, rk);
176 y1 = RotateLeft64<3>(y1);
177 y2 = RotateLeft64<3>(y2);
178 y3 = RotateLeft64<3>(y3);
179 y1 = veorq_u64(y1, x1);
180 y2 = veorq_u64(y2, x2);
181 y3 = veorq_u64(y3, x3);
185 block0 = UnpackLow64(y1, x1);
186 block1 = UnpackHigh64(y1, x1);
187 block2 = UnpackLow64(y2, x2);
188 block3 = UnpackHigh64(y2, x2);
189 block4 = UnpackLow64(y3, x3);
190 block5 = UnpackHigh64(y3, x3);
193 inline void SPECK128_Dec_Block(uint64x2_t &block0, uint64x2_t &block1,
194 const word64 *subkeys,
unsigned int rounds)
197 uint64x2_t x1 = UnpackHigh64(block0, block1);
198 uint64x2_t y1 = UnpackLow64(block0, block1);
200 for (
int i =
static_cast<int>(rounds-1); i >= 0; --i)
202 const uint64x2_t rk = vld1q_dup_u64(subkeys+i);
204 y1 = veorq_u64(y1, x1);
205 y1 = RotateRight64<3>(y1);
206 x1 = veorq_u64(x1, rk);
207 x1 = vsubq_u64(x1, y1);
208 x1 = RotateLeft64<8>(x1);
212 block0 = UnpackLow64(y1, x1);
213 block1 = UnpackHigh64(y1, x1);
216 inline void SPECK128_Dec_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
217 uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5,
218 const word64 *subkeys,
unsigned int rounds)
221 uint64x2_t x1 = UnpackHigh64(block0, block1);
222 uint64x2_t y1 = UnpackLow64(block0, block1);
223 uint64x2_t x2 = UnpackHigh64(block2, block3);
224 uint64x2_t y2 = UnpackLow64(block2, block3);
225 uint64x2_t x3 = UnpackHigh64(block4, block5);
226 uint64x2_t y3 = UnpackLow64(block4, block5);
228 for (
int i =
static_cast<int>(rounds-1); i >= 0; --i)
230 const uint64x2_t rk = vld1q_dup_u64(subkeys+i);
232 y1 = veorq_u64(y1, x1);
233 y2 = veorq_u64(y2, x2);
234 y3 = veorq_u64(y3, x3);
235 y1 = RotateRight64<3>(y1);
236 y2 = RotateRight64<3>(y2);
237 y3 = RotateRight64<3>(y3);
238 x1 = veorq_u64(x1, rk);
239 x2 = veorq_u64(x2, rk);
240 x3 = veorq_u64(x3, rk);
241 x1 = vsubq_u64(x1, y1);
242 x2 = vsubq_u64(x2, y2);
243 x3 = vsubq_u64(x3, y3);
244 x1 = RotateLeft64<8>(x1);
245 x2 = RotateLeft64<8>(x2);
246 x3 = RotateLeft64<8>(x3);
250 block0 = UnpackLow64(y1, x1);
251 block1 = UnpackHigh64(y1, x1);
252 block2 = UnpackLow64(y2, x2);
253 block3 = UnpackHigh64(y2, x2);
254 block4 = UnpackLow64(y3, x3);
255 block5 = UnpackHigh64(y3, x3);
258 #endif // CRYPTOPP_ARM_NEON_AVAILABLE
262 #if defined(CRYPTOPP_SSSE3_AVAILABLE)
266 # define M128_CAST(x) ((__m128i *)(void *)(x))
268 #ifndef CONST_M128_CAST
269 # define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
274 # define DOUBLE_CAST(x) ((double *)(void *)(x))
276 #ifndef CONST_DOUBLE_CAST
277 # define CONST_DOUBLE_CAST(x) ((const double *)(const void *)(x))
280 template <
unsigned int R>
281 inline __m128i RotateLeft64(
const __m128i& val)
284 return _mm_roti_epi64(val, R);
287 _mm_slli_epi64(val, R), _mm_srli_epi64(val, 64-R));
291 template <
unsigned int R>
292 inline __m128i RotateRight64(
const __m128i& val)
295 return _mm_roti_epi64(val, 64-R);
298 _mm_slli_epi64(val, 64-R), _mm_srli_epi64(val, R));
304 __m128i RotateLeft64<8>(
const __m128i& val)
307 return _mm_roti_epi64(val, 8);
309 const __m128i mask = _mm_set_epi8(14,13,12,11, 10,9,8,15, 6,5,4,3, 2,1,0,7);
310 return _mm_shuffle_epi8(val, mask);
316 __m128i RotateRight64<8>(
const __m128i& val)
319 return _mm_roti_epi64(val, 64-8);
321 const __m128i mask = _mm_set_epi8(8,15,14,13, 12,11,10,9, 0,7,6,5, 4,3,2,1);
322 return _mm_shuffle_epi8(val, mask);
326 inline void SPECK128_Enc_Block(__m128i &block0, __m128i &block1,
327 const word64 *subkeys,
unsigned int rounds)
330 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
331 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
333 for (
size_t i=0; i < static_cast<size_t>(rounds); ++i)
338 x1 = RotateRight64<8>(x1);
339 x1 = _mm_add_epi64(x1, y1);
340 x1 = _mm_xor_si128(x1, rk);
341 y1 = RotateLeft64<3>(y1);
342 y1 = _mm_xor_si128(y1, x1);
346 block0 = _mm_unpacklo_epi64(y1, x1);
347 block1 = _mm_unpackhi_epi64(y1, x1);
350 inline void SPECK128_Enc_6_Blocks(__m128i &block0, __m128i &block1,
351 __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5,
352 const word64 *subkeys,
unsigned int rounds)
355 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
356 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
357 __m128i x2 = _mm_unpackhi_epi64(block2, block3);
358 __m128i y2 = _mm_unpacklo_epi64(block2, block3);
359 __m128i x3 = _mm_unpackhi_epi64(block4, block5);
360 __m128i y3 = _mm_unpacklo_epi64(block4, block5);
362 for (
size_t i=0; i < static_cast<size_t>(rounds); ++i)
367 x1 = RotateRight64<8>(x1);
368 x2 = RotateRight64<8>(x2);
369 x3 = RotateRight64<8>(x3);
370 x1 = _mm_add_epi64(x1, y1);
371 x2 = _mm_add_epi64(x2, y2);
372 x3 = _mm_add_epi64(x3, y3);
373 x1 = _mm_xor_si128(x1, rk);
374 x2 = _mm_xor_si128(x2, rk);
375 x3 = _mm_xor_si128(x3, rk);
376 y1 = RotateLeft64<3>(y1);
377 y2 = RotateLeft64<3>(y2);
378 y3 = RotateLeft64<3>(y3);
379 y1 = _mm_xor_si128(y1, x1);
380 y2 = _mm_xor_si128(y2, x2);
381 y3 = _mm_xor_si128(y3, x3);
385 block0 = _mm_unpacklo_epi64(y1, x1);
386 block1 = _mm_unpackhi_epi64(y1, x1);
387 block2 = _mm_unpacklo_epi64(y2, x2);
388 block3 = _mm_unpackhi_epi64(y2, x2);
389 block4 = _mm_unpacklo_epi64(y3, x3);
390 block5 = _mm_unpackhi_epi64(y3, x3);
393 inline void SPECK128_Dec_Block(__m128i &block0, __m128i &block1,
394 const word64 *subkeys,
unsigned int rounds)
397 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
398 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
400 for (
int i =
static_cast<int>(rounds-1); i >= 0; --i)
402 const __m128i rk = _mm_castpd_si128(
403 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i)));
405 y1 = _mm_xor_si128(y1, x1);
406 y1 = RotateRight64<3>(y1);
407 x1 = _mm_xor_si128(x1, rk);
408 x1 = _mm_sub_epi64(x1, y1);
409 x1 = RotateLeft64<8>(x1);
413 block0 = _mm_unpacklo_epi64(y1, x1);
414 block1 = _mm_unpackhi_epi64(y1, x1);
417 inline void SPECK128_Dec_6_Blocks(__m128i &block0, __m128i &block1,
418 __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5,
419 const word64 *subkeys,
unsigned int rounds)
422 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
423 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
424 __m128i x2 = _mm_unpackhi_epi64(block2, block3);
425 __m128i y2 = _mm_unpacklo_epi64(block2, block3);
426 __m128i x3 = _mm_unpackhi_epi64(block4, block5);
427 __m128i y3 = _mm_unpacklo_epi64(block4, block5);
429 for (
int i =
static_cast<int>(rounds-1); i >= 0; --i)
431 const __m128i rk = _mm_castpd_si128(
432 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i)));
434 y1 = _mm_xor_si128(y1, x1);
435 y2 = _mm_xor_si128(y2, x2);
436 y3 = _mm_xor_si128(y3, x3);
437 y1 = RotateRight64<3>(y1);
438 y2 = RotateRight64<3>(y2);
439 y3 = RotateRight64<3>(y3);
440 x1 = _mm_xor_si128(x1, rk);
441 x2 = _mm_xor_si128(x2, rk);
442 x3 = _mm_xor_si128(x3, rk);
443 x1 = _mm_sub_epi64(x1, y1);
444 x2 = _mm_sub_epi64(x2, y2);
445 x3 = _mm_sub_epi64(x3, y3);
446 x1 = RotateLeft64<8>(x1);
447 x2 = RotateLeft64<8>(x2);
448 x3 = RotateLeft64<8>(x3);
452 block0 = _mm_unpacklo_epi64(y1, x1);
453 block1 = _mm_unpackhi_epi64(y1, x1);
454 block2 = _mm_unpacklo_epi64(y2, x2);
455 block3 = _mm_unpackhi_epi64(y2, x2);
456 block4 = _mm_unpacklo_epi64(y3, x3);
457 block5 = _mm_unpackhi_epi64(y3, x3);
460 #endif // CRYPTOPP_SSSE3_AVAILABLE
464 #if defined(CRYPTOPP_ALTIVEC_AVAILABLE)
481 #if defined(_ARCH_PWR8)
497 #if defined(_ARCH_PWR8)
498 #define speck128_t uint64x2_p
500 #define speck128_t uint32x4_p
503 void SPECK128_Enc_Block(
uint32x4_p &block,
const word64 *subkeys,
unsigned int rounds)
505 #if (CRYPTOPP_BIG_ENDIAN)
506 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
507 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
509 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
510 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
514 speck128_t x1 = (speck128_t)
VecPermute(block, block, m1);
515 speck128_t y1 = (speck128_t)
VecPermute(block, block, m2);
517 for (
size_t i=0; i < static_cast<size_t>(rounds); ++i)
520 const word32* ptr =
reinterpret_cast<const word32*
>(subkeys+i*2);
527 y1 = (speck128_t)VecRotateLeft64<3>(y1);
531 #if (CRYPTOPP_BIG_ENDIAN)
532 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
535 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
543 void SPECK128_Dec_Block(
uint32x4_p &block,
const word64 *subkeys,
unsigned int rounds)
545 #if (CRYPTOPP_BIG_ENDIAN)
546 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
547 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
549 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
550 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
554 speck128_t x1 = (speck128_t)
VecPermute(block, block, m1);
555 speck128_t y1 = (speck128_t)
VecPermute(block, block, m2);
557 for (
int i =
static_cast<int>(rounds-1); i >= 0; --i)
562 y1 = (speck128_t)VecRotateRight64<3>(y1);
568 #if (CRYPTOPP_BIG_ENDIAN)
569 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
572 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
584 #if (CRYPTOPP_BIG_ENDIAN)
585 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
586 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
588 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
589 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
593 speck128_t x1 = (speck128_t)
VecPermute(block0, block1, m1);
594 speck128_t y1 = (speck128_t)
VecPermute(block0, block1, m2);
595 speck128_t x2 = (speck128_t)
VecPermute(block2, block3, m1);
596 speck128_t y2 = (speck128_t)
VecPermute(block2, block3, m2);
597 speck128_t x3 = (speck128_t)
VecPermute(block4, block5, m1);
598 speck128_t y3 = (speck128_t)
VecPermute(block4, block5, m2);
600 for (
size_t i=0; i < static_cast<size_t>(rounds); ++i)
603 const word32* ptr =
reinterpret_cast<const word32*
>(subkeys+i*2);
616 y1 = (speck128_t)VecRotateLeft64<3>(y1);
617 y2 = (speck128_t)VecRotateLeft64<3>(y2);
618 y3 = (speck128_t)VecRotateLeft64<3>(y3);
624 #if (CRYPTOPP_BIG_ENDIAN)
625 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
626 const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
628 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
629 const uint8x16_p m4 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
645 #if (CRYPTOPP_BIG_ENDIAN)
646 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
647 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
649 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
650 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
654 speck128_t x1 = (speck128_t)
VecPermute(block0, block1, m1);
655 speck128_t y1 = (speck128_t)
VecPermute(block0, block1, m2);
656 speck128_t x2 = (speck128_t)
VecPermute(block2, block3, m1);
657 speck128_t y2 = (speck128_t)
VecPermute(block2, block3, m2);
658 speck128_t x3 = (speck128_t)
VecPermute(block4, block5, m1);
659 speck128_t y3 = (speck128_t)
VecPermute(block4, block5, m2);
661 for (
int i =
static_cast<int>(rounds-1); i >= 0; --i)
668 y1 = (speck128_t)VecRotateRight64<3>(y1);
669 y2 = (speck128_t)VecRotateRight64<3>(y2);
670 y3 = (speck128_t)VecRotateRight64<3>(y3);
683 #if (CRYPTOPP_BIG_ENDIAN)
684 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
685 const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
687 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
688 const uint8x16_p m4 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
700 #endif // CRYPTOPP_ALTIVEC_AVAILABLE
702 ANONYMOUS_NAMESPACE_END
710 #if (CRYPTOPP_ARM_NEON_AVAILABLE)
711 size_t SPECK128_Enc_AdvancedProcessBlocks_NEON(
const word64* subKeys,
size_t rounds,
712 const byte *inBlocks,
const byte *xorBlocks,
byte *outBlocks,
size_t length,
word32 flags)
715 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
718 size_t SPECK128_Dec_AdvancedProcessBlocks_NEON(
const word64* subKeys,
size_t rounds,
719 const byte *inBlocks,
const byte *xorBlocks,
byte *outBlocks,
size_t length,
word32 flags)
722 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
724 #endif // CRYPTOPP_ARM_NEON_AVAILABLE
728 #if (CRYPTOPP_SSSE3_AVAILABLE)
729 size_t SPECK128_Enc_AdvancedProcessBlocks_SSSE3(
const word64* subKeys,
size_t rounds,
730 const byte *inBlocks,
const byte *xorBlocks,
byte *outBlocks,
size_t length,
word32 flags)
733 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
736 size_t SPECK128_Dec_AdvancedProcessBlocks_SSSE3(
const word64* subKeys,
size_t rounds,
737 const byte *inBlocks,
const byte *xorBlocks,
byte *outBlocks,
size_t length,
word32 flags)
740 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
742 #endif // CRYPTOPP_SSSE3_AVAILABLE
746 #if (CRYPTOPP_ALTIVEC_AVAILABLE)
747 size_t SPECK128_Enc_AdvancedProcessBlocks_ALTIVEC(
const word64* subKeys,
size_t rounds,
748 const byte *inBlocks,
const byte *xorBlocks,
byte *outBlocks,
size_t length,
word32 flags)
751 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
754 size_t SPECK128_Dec_AdvancedProcessBlocks_ALTIVEC(
const word64* subKeys,
size_t rounds,
755 const byte *inBlocks,
const byte *xorBlocks,
byte *outBlocks,
size_t length,
word32 flags)
758 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
760 #endif // CRYPTOPP_ALTIVEC_AVAILABLE