xref: /qemu/include/crypto/clmul.h (revision 00f463b3)
107f348d7SRichard Henderson /*
207f348d7SRichard Henderson  * Carry-less multiply operations.
307f348d7SRichard Henderson  * SPDX-License-Identifier: GPL-2.0-or-later
407f348d7SRichard Henderson  *
507f348d7SRichard Henderson  * Copyright (C) 2023 Linaro, Ltd.
607f348d7SRichard Henderson  */
707f348d7SRichard Henderson 
807f348d7SRichard Henderson #ifndef CRYPTO_CLMUL_H
907f348d7SRichard Henderson #define CRYPTO_CLMUL_H
1007f348d7SRichard Henderson 
11*00f463b3SRichard Henderson #include "qemu/int128.h"
12*00f463b3SRichard Henderson #include "host/crypto/clmul.h"
13*00f463b3SRichard Henderson 
1407f348d7SRichard Henderson /**
1507f348d7SRichard Henderson  * clmul_8x8_low:
1607f348d7SRichard Henderson  *
1707f348d7SRichard Henderson  * Perform eight 8x8->8 carry-less multiplies.
1807f348d7SRichard Henderson  */
1907f348d7SRichard Henderson uint64_t clmul_8x8_low(uint64_t, uint64_t);
2007f348d7SRichard Henderson 
2107f348d7SRichard Henderson /**
2207f348d7SRichard Henderson  * clmul_8x4_even:
2307f348d7SRichard Henderson  *
2407f348d7SRichard Henderson  * Perform four 8x8->16 carry-less multiplies.
2507f348d7SRichard Henderson  * The odd bytes of the inputs are ignored.
2607f348d7SRichard Henderson  */
2707f348d7SRichard Henderson uint64_t clmul_8x4_even(uint64_t, uint64_t);
2807f348d7SRichard Henderson 
2907f348d7SRichard Henderson /**
3007f348d7SRichard Henderson  * clmul_8x4_odd:
3107f348d7SRichard Henderson  *
3207f348d7SRichard Henderson  * Perform four 8x8->16 carry-less multiplies.
3307f348d7SRichard Henderson  * The even bytes of the inputs are ignored.
3407f348d7SRichard Henderson  */
3507f348d7SRichard Henderson uint64_t clmul_8x4_odd(uint64_t, uint64_t);
3607f348d7SRichard Henderson 
3707f348d7SRichard Henderson /**
3807f348d7SRichard Henderson  * clmul_8x4_packed:
3907f348d7SRichard Henderson  *
4007f348d7SRichard Henderson  * Perform four 8x8->16 carry-less multiplies.
4107f348d7SRichard Henderson  */
4207f348d7SRichard Henderson uint64_t clmul_8x4_packed(uint32_t, uint32_t);
4307f348d7SRichard Henderson 
44cf1b2cabSRichard Henderson /**
45cf1b2cabSRichard Henderson  * clmul_16x2_even:
46cf1b2cabSRichard Henderson  *
47cf1b2cabSRichard Henderson  * Perform two 16x16->32 carry-less multiplies.
48cf1b2cabSRichard Henderson  * The odd words of the inputs are ignored.
49cf1b2cabSRichard Henderson  */
50cf1b2cabSRichard Henderson uint64_t clmul_16x2_even(uint64_t, uint64_t);
51cf1b2cabSRichard Henderson 
52cf1b2cabSRichard Henderson /**
53cf1b2cabSRichard Henderson  * clmul_16x2_odd:
54cf1b2cabSRichard Henderson  *
55cf1b2cabSRichard Henderson  * Perform two 16x16->32 carry-less multiplies.
56cf1b2cabSRichard Henderson  * The even words of the inputs are ignored.
57cf1b2cabSRichard Henderson  */
58cf1b2cabSRichard Henderson uint64_t clmul_16x2_odd(uint64_t, uint64_t);
59cf1b2cabSRichard Henderson 
609a65a570SRichard Henderson /**
619a65a570SRichard Henderson  * clmul_32:
629a65a570SRichard Henderson  *
639a65a570SRichard Henderson  * Perform a 32x32->64 carry-less multiply.
649a65a570SRichard Henderson  */
659a65a570SRichard Henderson uint64_t clmul_32(uint32_t, uint32_t);
669a65a570SRichard Henderson 
67*00f463b3SRichard Henderson /**
68*00f463b3SRichard Henderson  * clmul_64:
69*00f463b3SRichard Henderson  *
70*00f463b3SRichard Henderson  * Perform a 64x64->128 carry-less multiply.
71*00f463b3SRichard Henderson  */
72*00f463b3SRichard Henderson Int128 clmul_64_gen(uint64_t, uint64_t);
73*00f463b3SRichard Henderson 
clmul_64(uint64_t a,uint64_t b)74*00f463b3SRichard Henderson static inline Int128 clmul_64(uint64_t a, uint64_t b)
75*00f463b3SRichard Henderson {
76*00f463b3SRichard Henderson     if (HAVE_CLMUL_ACCEL) {
77*00f463b3SRichard Henderson         return clmul_64_accel(a, b);
78*00f463b3SRichard Henderson     } else {
79*00f463b3SRichard Henderson         return clmul_64_gen(a, b);
80*00f463b3SRichard Henderson     }
81*00f463b3SRichard Henderson }
82*00f463b3SRichard Henderson 
8307f348d7SRichard Henderson #endif /* CRYPTO_CLMUL_H */
84