diff --git a/target/linux/ath79/patches-4.19/998-fix-ecc-bug.patch b/target/linux/ath79/patches-4.19/998-fix-ecc-bug.patch new file mode 100644 index 00000000000..fbc470a8b2e --- /dev/null +++ b/target/linux/ath79/patches-4.19/998-fix-ecc-bug.patch @@ -0,0 +1,871 @@ +--- linux-4.19.82/crypto/ecc.cg 2019-11-06 12:06:31.000000000 +0000 ++++ linux-4.19.82/crypto/ecc.c 2022-04-06 23:45:33.778283892 +0100 +@@ -1,6 +1,6 @@ + /* +- * Copyright (c) 2013, Kenneth MacKay +- * All rights reserved. ++ * Copyright (c) 2013, 2014 Kenneth MacKay. All rights reserved. ++ * Copyright (c) 2019 Vitaly Chikunov + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are +@@ -24,12 +24,15 @@ + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + ++#include + #include + #include + #include + #include + #include + #include ++#include ++#include + + #include "ecc.h" + #include "ecc_curve_defs.h" +@@ -112,7 +115,7 @@ + } + + /* Returns true if vli == 0, false otherwise. */ +-static bool vli_is_zero(const u64 *vli, unsigned int ndigits) ++bool vli_is_zero(const u64 *vli, unsigned int ndigits) + { + int i; + +@@ -123,6 +126,7 @@ + + return true; + } ++EXPORT_SYMBOL(vli_is_zero); + + /* Returns nonzero if bit bit of vli is set. */ + static u64 vli_test_bit(const u64 *vli, unsigned int bit) +@@ -130,6 +134,11 @@ + return (vli[bit / 64] & ((u64)1 << (bit % 64))); + } + ++static bool vli_is_negative(const u64 *vli, unsigned int ndigits) ++{ ++ return vli_test_bit(vli, ndigits * 64 - 1); ++} ++ + /* Counts the number of 64-bit "digits" in vli. */ + static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits) + { +@@ -161,6 +170,27 @@ + return ((num_digits - 1) * 64 + i); + } + ++/* Set dest from unaligned bit string src. */ ++void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits) ++{ ++ int i; ++ const u64 *from = src; ++ ++ for (i = 0; i < ndigits; i++) ++ dest[i] = get_unaligned_be64(&from[ndigits - 1 - i]); ++} ++EXPORT_SYMBOL(vli_from_be64); ++ ++void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits) ++{ ++ int i; ++ const u64 *from = src; ++ ++ for (i = 0; i < ndigits; i++) ++ dest[i] = get_unaligned_le64(&from[i]); ++} ++EXPORT_SYMBOL(vli_from_le64); ++ + /* Sets dest = src. */ + static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits) + { +@@ -171,7 +201,7 @@ + } + + /* Returns sign of left - right. */ +-static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits) ++int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits) + { + int i; + +@@ -184,6 +214,7 @@ + + return 0; + } ++EXPORT_SYMBOL(vli_cmp); + + /* Computes result = in << c, returning carry. Can modify in place + * (if result == in). 0 < shift < 64. +@@ -239,8 +270,30 @@ + return carry; + } + ++/* Computes result = left + right, returning carry. Can modify in place. */ ++static u64 vli_uadd(u64 *result, const u64 *left, u64 right, ++ unsigned int ndigits) ++{ ++ u64 carry = right; ++ int i; ++ ++ for (i = 0; i < ndigits; i++) { ++ u64 sum; ++ ++ sum = left[i] + carry; ++ if (sum != left[i]) ++ carry = (sum < left[i]); ++ else ++ carry = !!carry; ++ ++ result[i] = sum; ++ } ++ ++ return carry; ++} ++ + /* Computes result = left - right, returning borrow. Can modify in place. */ +-static u64 vli_sub(u64 *result, const u64 *left, const u64 *right, ++u64 vli_sub(u64 *result, const u64 *left, const u64 *right, + unsigned int ndigits) + { + u64 borrow = 0; +@@ -258,9 +311,37 @@ + + return borrow; + } ++EXPORT_SYMBOL(vli_sub); ++ ++/* Computes result = left - right, returning borrow. Can modify in place. */ ++static u64 vli_usub(u64 *result, const u64 *left, u64 right, ++ unsigned int ndigits) ++{ ++ u64 borrow = right; ++ int i; ++ ++ for (i = 0; i < ndigits; i++) { ++ u64 diff; ++ ++ diff = left[i] - borrow; ++ if (diff != left[i]) ++ borrow = (diff > left[i]); ++ ++ result[i] = diff; ++ } ++ ++ return borrow; ++} + + static uint128_t mul_64_64(u64 left, u64 right) + { ++ uint128_t result; ++#if defined(CONFIG_ARCH_SUPPORTS_INT128) ++ unsigned __int128 m = (unsigned __int128)left * right; ++ ++ result.m_low = m; ++ result.m_high = m >> 64; ++#else + u64 a0 = left & 0xffffffffull; + u64 a1 = left >> 32; + u64 b0 = right & 0xffffffffull; +@@ -269,7 +350,6 @@ + u64 m1 = a0 * b1; + u64 m2 = a1 * b0; + u64 m3 = a1 * b1; +- uint128_t result; + + m2 += (m0 >> 32); + m2 += m1; +@@ -280,7 +360,7 @@ + + result.m_low = (m0 & 0xffffffffull) | (m2 << 32); + result.m_high = m3 + (m2 >> 32); +- ++#endif + return result; + } + +@@ -330,6 +410,28 @@ + result[ndigits * 2 - 1] = r01.m_low; + } + ++/* Compute product = left * right, for a small right value. */ ++static void vli_umult(u64 *result, const u64 *left, u32 right, ++ unsigned int ndigits) ++{ ++ uint128_t r01 = { 0 }; ++ unsigned int k; ++ ++ for (k = 0; k < ndigits; k++) { ++ uint128_t product; ++ ++ product = mul_64_64(left[k], right); ++ r01 = add_128_128(r01, product); ++ /* no carry */ ++ result[k] = r01.m_low; ++ r01.m_low = r01.m_high; ++ r01.m_high = 0; ++ } ++ result[k] = r01.m_low; ++ for (++k; k < ndigits * 2; k++) ++ result[k] = 0; ++} ++ + static void vli_square(u64 *result, const u64 *left, unsigned int ndigits) + { + uint128_t r01 = { 0, 0 }; +@@ -402,6 +504,170 @@ + vli_add(result, result, mod, ndigits); + } + ++/* ++ * Computes result = product % mod ++ * for special form moduli: p = 2^k-c, for small c (note the minus sign) ++ * ++ * References: ++ * R. Crandall, C. Pomerance. Prime Numbers: A Computational Perspective. ++ * 9 Fast Algorithms for Large-Integer Arithmetic. 9.2.3 Moduli of special form ++ * Algorithm 9.2.13 (Fast mod operation for special-form moduli). ++ */ ++static void vli_mmod_special(u64 *result, const u64 *product, ++ const u64 *mod, unsigned int ndigits) ++{ ++ u64 c = -mod[0]; ++ u64 t[ECC_MAX_DIGITS * 2]; ++ u64 r[ECC_MAX_DIGITS * 2]; ++ ++ vli_set(r, product, ndigits * 2); ++ while (!vli_is_zero(r + ndigits, ndigits)) { ++ vli_umult(t, r + ndigits, c, ndigits); ++ vli_clear(r + ndigits, ndigits); ++ vli_add(r, r, t, ndigits * 2); ++ } ++ vli_set(t, mod, ndigits); ++ vli_clear(t + ndigits, ndigits); ++ while (vli_cmp(r, t, ndigits * 2) >= 0) ++ vli_sub(r, r, t, ndigits * 2); ++ vli_set(result, r, ndigits); ++} ++ ++/* ++ * Computes result = product % mod ++ * for special form moduli: p = 2^{k-1}+c, for small c (note the plus sign) ++ * where k-1 does not fit into qword boundary by -1 bit (such as 255). ++ ++ * References (loosely based on): ++ * A. Menezes, P. van Oorschot, S. Vanstone. Handbook of Applied Cryptography. ++ * 14.3.4 Reduction methods for moduli of special form. Algorithm 14.47. ++ * URL: http://cacr.uwaterloo.ca/hac/about/chap14.pdf ++ * ++ * H. Cohen, G. Frey, R. Avanzi, C. Doche, T. Lange, K. Nguyen, F. Vercauteren. ++ * Handbook of Elliptic and Hyperelliptic Curve Cryptography. ++ * Algorithm 10.25 Fast reduction for special form moduli ++ */ ++static void vli_mmod_special2(u64 *result, const u64 *product, ++ const u64 *mod, unsigned int ndigits) ++{ ++ u64 c2 = mod[0] * 2; ++ u64 q[ECC_MAX_DIGITS]; ++ u64 r[ECC_MAX_DIGITS * 2]; ++ u64 m[ECC_MAX_DIGITS * 2]; /* expanded mod */ ++ int carry; /* last bit that doesn't fit into q */ ++ int i; ++ ++ vli_set(m, mod, ndigits); ++ vli_clear(m + ndigits, ndigits); ++ ++ vli_set(r, product, ndigits); ++ /* q and carry are top bits */ ++ vli_set(q, product + ndigits, ndigits); ++ vli_clear(r + ndigits, ndigits); ++ carry = vli_is_negative(r, ndigits); ++ if (carry) ++ r[ndigits - 1] &= (1ull << 63) - 1; ++ for (i = 1; carry || !vli_is_zero(q, ndigits); i++) { ++ u64 qc[ECC_MAX_DIGITS * 2]; ++ ++ vli_umult(qc, q, c2, ndigits); ++ if (carry) ++ vli_uadd(qc, qc, mod[0], ndigits * 2); ++ vli_set(q, qc + ndigits, ndigits); ++ vli_clear(qc + ndigits, ndigits); ++ carry = vli_is_negative(qc, ndigits); ++ if (carry) ++ qc[ndigits - 1] &= (1ull << 63) - 1; ++ if (i & 1) ++ vli_sub(r, r, qc, ndigits * 2); ++ else ++ vli_add(r, r, qc, ndigits * 2); ++ } ++ while (vli_is_negative(r, ndigits * 2)) ++ vli_add(r, r, m, ndigits * 2); ++ while (vli_cmp(r, m, ndigits * 2) >= 0) ++ vli_sub(r, r, m, ndigits * 2); ++ ++ vli_set(result, r, ndigits); ++} ++ ++/* ++ * Computes result = product % mod, where product is 2N words long. ++ * Reference: Ken MacKay's micro-ecc. ++ * Currently only designed to work for curve_p or curve_n. ++ */ ++static void vli_mmod_slow(u64 *result, u64 *product, const u64 *mod, ++ unsigned int ndigits) ++{ ++ u64 mod_m[2 * ECC_MAX_DIGITS]; ++ u64 tmp[2 * ECC_MAX_DIGITS]; ++ u64 *v[2] = { tmp, product }; ++ u64 carry = 0; ++ unsigned int i; ++ /* Shift mod so its highest set bit is at the maximum position. */ ++ int shift = (ndigits * 2 * 64) - vli_num_bits(mod, ndigits); ++ int word_shift = shift / 64; ++ int bit_shift = shift % 64; ++ ++ vli_clear(mod_m, word_shift); ++ if (bit_shift > 0) { ++ for (i = 0; i < ndigits; ++i) { ++ mod_m[word_shift + i] = (mod[i] << bit_shift) | carry; ++ carry = mod[i] >> (64 - bit_shift); ++ } ++ } else ++ vli_set(mod_m + word_shift, mod, ndigits); ++ ++ for (i = 1; shift >= 0; --shift) { ++ u64 borrow = 0; ++ unsigned int j; ++ ++ for (j = 0; j < ndigits * 2; ++j) { ++ u64 diff = v[i][j] - mod_m[j] - borrow; ++ ++ if (diff != v[i][j]) ++ borrow = (diff > v[i][j]); ++ v[1 - i][j] = diff; ++ } ++ i = !(i ^ borrow); /* Swap the index if there was no borrow */ ++ vli_rshift1(mod_m, ndigits); ++ mod_m[ndigits - 1] |= mod_m[ndigits] << (64 - 1); ++ vli_rshift1(mod_m + ndigits, ndigits); ++ } ++ vli_set(result, v[i], ndigits); ++} ++ ++/* Computes result = product % mod using Barrett's reduction with precomputed ++ * value mu appended to the mod after ndigits, mu = (2^{2w} / mod) and have ++ * length ndigits + 1, where mu * (2^w - 1) should not overflow ndigits ++ * boundary. ++ * ++ * Reference: ++ * R. Brent, P. Zimmermann. Modern Computer Arithmetic. 2010. ++ * 2.4.1 Barrett's algorithm. Algorithm 2.5. ++ */ ++static void vli_mmod_barrett(u64 *result, u64 *product, const u64 *mod, ++ unsigned int ndigits) ++{ ++ u64 q[ECC_MAX_DIGITS * 2]; ++ u64 r[ECC_MAX_DIGITS * 2]; ++ const u64 *mu = mod + ndigits; ++ ++ vli_mult(q, product + ndigits, mu, ndigits); ++ if (mu[ndigits]) ++ vli_add(q + ndigits, q + ndigits, product + ndigits, ndigits); ++ vli_mult(r, mod, q + ndigits, ndigits); ++ vli_sub(r, product, r, ndigits * 2); ++ while (!vli_is_zero(r + ndigits, ndigits) || ++ vli_cmp(r, mod, ndigits) != -1) { ++ u64 carry; ++ ++ carry = vli_sub(r, r, mod, ndigits); ++ vli_usub(r + ndigits, r + ndigits, carry, ndigits); ++ } ++ vli_set(result, r, ndigits); ++} ++ + /* Computes p_result = p_product % curve_p. + * See algorithm 5 and 6 from + * http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf +@@ -509,14 +775,33 @@ + } + } + +-/* Computes result = product % curve_prime +- * from http://www.nsa.gov/ia/_files/nist-routines.pdf +-*/ ++/* Computes result = product % curve_prime for different curve_primes. ++ * ++ * Note that curve_primes are distinguished just by heuristic check and ++ * not by complete conformance check. ++ */ + static bool vli_mmod_fast(u64 *result, u64 *product, + const u64 *curve_prime, unsigned int ndigits) + { + u64 tmp[2 * ECC_MAX_DIGITS]; + ++ /* Currently, both NIST primes have -1 in lowest qword. */ ++ if (curve_prime[0] != -1ull) { ++ /* Try to handle Pseudo-Marsenne primes. */ ++ if (curve_prime[ndigits - 1] == -1ull) { ++ vli_mmod_special(result, product, curve_prime, ++ ndigits); ++ return true; ++ } else if (curve_prime[ndigits - 1] == 1ull << 63 && ++ curve_prime[ndigits - 2] == 0) { ++ vli_mmod_special2(result, product, curve_prime, ++ ndigits); ++ return true; ++ } ++ vli_mmod_barrett(result, product, curve_prime, ndigits); ++ return true; ++ } ++ + switch (ndigits) { + case 3: + vli_mmod_fast_192(result, product, curve_prime, tmp); +@@ -525,13 +810,26 @@ + vli_mmod_fast_256(result, product, curve_prime, tmp); + break; + default: +- pr_err("unsupports digits size!\n"); ++ pr_err_ratelimited("ecc: unsupported digits size!\n"); + return false; + } + + return true; + } + ++/* Computes result = (left * right) % mod. ++ * Assumes that mod is big enough curve order. ++ */ ++void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right, ++ const u64 *mod, unsigned int ndigits) ++{ ++ u64 product[ECC_MAX_DIGITS * 2]; ++ ++ vli_mult(product, left, right, ndigits); ++ vli_mmod_slow(result, product, mod, ndigits); ++} ++EXPORT_SYMBOL(vli_mod_mult_slow); ++ + /* Computes result = (left * right) % curve_prime. */ + static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right, + const u64 *curve_prime, unsigned int ndigits) +@@ -557,7 +855,7 @@ + * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide" + * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf + */ +-static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod, ++void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod, + unsigned int ndigits) + { + u64 a[ECC_MAX_DIGITS], b[ECC_MAX_DIGITS]; +@@ -630,6 +928,7 @@ + + vli_set(result, u, ndigits); + } ++EXPORT_SYMBOL(vli_mod_inv); + + /* ------ Point operations ------ */ + +@@ -903,39 +1202,133 @@ + vli_set(result->y, ry[0], ndigits); + } + ++/* Computes R = P + Q mod p */ ++static void ecc_point_add(const struct ecc_point *result, ++ const struct ecc_point *p, const struct ecc_point *q, ++ const struct ecc_curve *curve) ++{ ++ u64 z[ECC_MAX_DIGITS]; ++ u64 px[ECC_MAX_DIGITS]; ++ u64 py[ECC_MAX_DIGITS]; ++ unsigned int ndigits = curve->g.ndigits; ++ ++ vli_set(result->x, q->x, ndigits); ++ vli_set(result->y, q->y, ndigits); ++ vli_mod_sub(z, result->x, p->x, curve->p, ndigits); ++ vli_set(px, p->x, ndigits); ++ vli_set(py, p->y, ndigits); ++ xycz_add(px, py, result->x, result->y, curve->p, ndigits); ++ vli_mod_inv(z, z, curve->p, ndigits); ++ apply_z(result->x, result->y, z, curve->p, ndigits); ++} ++ ++/* Computes R = u1P + u2Q mod p using Shamir's trick. ++ * Based on: Kenneth MacKay's micro-ecc (2014). ++ */ ++void ecc_point_mult_shamir(const struct ecc_point *result, ++ const u64 *u1, const struct ecc_point *p, ++ const u64 *u2, const struct ecc_point *q, ++ const struct ecc_curve *curve) ++{ ++ u64 z[ECC_MAX_DIGITS]; ++ u64 sump[2][ECC_MAX_DIGITS]; ++ u64 *rx = result->x; ++ u64 *ry = result->y; ++ unsigned int ndigits = curve->g.ndigits; ++ unsigned int num_bits; ++ struct ecc_point sum = ECC_POINT_INIT(sump[0], sump[1], ndigits); ++ const struct ecc_point *points[4]; ++ const struct ecc_point *point; ++ unsigned int idx; ++ int i; ++ ++ ecc_point_add(&sum, p, q, curve); ++ points[0] = NULL; ++ points[1] = p; ++ points[2] = q; ++ points[3] = ∑ ++ ++ num_bits = max(vli_num_bits(u1, ndigits), ++ vli_num_bits(u2, ndigits)); ++ i = num_bits - 1; ++ idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1); ++ point = points[idx]; ++ ++ vli_set(rx, point->x, ndigits); ++ vli_set(ry, point->y, ndigits); ++ vli_clear(z + 1, ndigits - 1); ++ z[0] = 1; ++ ++ for (--i; i >= 0; i--) { ++ ecc_point_double_jacobian(rx, ry, z, curve->p, ndigits); ++ idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1); ++ point = points[idx]; ++ if (point) { ++ u64 tx[ECC_MAX_DIGITS]; ++ u64 ty[ECC_MAX_DIGITS]; ++ u64 tz[ECC_MAX_DIGITS]; ++ ++ vli_set(tx, point->x, ndigits); ++ vli_set(ty, point->y, ndigits); ++ apply_z(tx, ty, z, curve->p, ndigits); ++ vli_mod_sub(tz, rx, tx, curve->p, ndigits); ++ xycz_add(tx, ty, rx, ry, curve->p, ndigits); ++ vli_mod_mult_fast(z, z, tz, curve->p, ndigits); ++ } ++ } ++ vli_mod_inv(z, z, curve->p, ndigits); ++ apply_z(rx, ry, z, curve->p, ndigits); ++} ++EXPORT_SYMBOL(ecc_point_mult_shamir); ++ + static inline void ecc_swap_digits(const u64 *in, u64 *out, + unsigned int ndigits) + { ++ const __be64 *src = (__force __be64 *)in; + int i; + + for (i = 0; i < ndigits; i++) +- out[i] = __swab64(in[ndigits - 1 - i]); ++ out[i] = be64_to_cpu(src[ndigits - 1 - i]); + } + +-int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, +- const u64 *private_key, unsigned int private_key_len) ++static int __ecc_is_key_valid(const struct ecc_curve *curve, ++ const u64 *private_key, unsigned int ndigits) + { +- int nbytes; +- const struct ecc_curve *curve = ecc_get_curve(curve_id); ++ u64 one[ECC_MAX_DIGITS] = { 1, }; ++ u64 res[ECC_MAX_DIGITS]; + + if (!private_key) + return -EINVAL; + +- nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT; +- +- if (private_key_len != nbytes) ++ if (curve->g.ndigits != ndigits) + return -EINVAL; + +- if (vli_is_zero(private_key, ndigits)) ++ /* Make sure the private key is in the range [2, n-3]. */ ++ if (vli_cmp(one, private_key, ndigits) != -1) + return -EINVAL; +- +- /* Make sure the private key is in the range [1, n-1]. */ +- if (vli_cmp(curve->n, private_key, ndigits) != 1) ++ vli_sub(res, curve->n, one, ndigits); ++ vli_sub(res, res, one, ndigits); ++ if (vli_cmp(res, private_key, ndigits) != 1) + return -EINVAL; + + return 0; + } + ++int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits, ++ const u64 *private_key, unsigned int private_key_len) ++{ ++ int nbytes; ++ const struct ecc_curve *curve = ecc_get_curve(curve_id); ++ ++ nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT; ++ ++ if (private_key_len != nbytes) ++ return -EINVAL; ++ ++ return __ecc_is_key_valid(curve, private_key, ndigits); ++} ++EXPORT_SYMBOL(ecc_is_key_valid); ++ + /* + * ECC private keys are generated using the method of extra random bits, + * equivalent to that described in FIPS 186-4, Appendix B.4.1. +@@ -979,17 +1372,15 @@ + if (err) + return err; + +- if (vli_is_zero(priv, ndigits)) +- return -EINVAL; +- +- /* Make sure the private key is in the range [1, n-1]. */ +- if (vli_cmp(curve->n, priv, ndigits) != 1) ++ /* Make sure the private key is in the valid range. */ ++ if (__ecc_is_key_valid(curve, priv, ndigits)) + return -EINVAL; + + ecc_swap_digits(priv, privkey, ndigits); + + return 0; + } ++EXPORT_SYMBOL(ecc_gen_privkey); + + int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits, + const u64 *private_key, u64 *public_key) +@@ -1026,13 +1417,17 @@ + out: + return ret; + } ++EXPORT_SYMBOL(ecc_make_pub_key); + + /* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */ +-static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, +- struct ecc_point *pk) ++int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, ++ struct ecc_point *pk) + { + u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS]; + ++ if (WARN_ON(pk->ndigits != curve->g.ndigits)) ++ return -EINVAL; ++ + /* Check 1: Verify key is not the zero point. */ + if (ecc_point_is_zero(pk)) + return -EINVAL; +@@ -1054,8 +1449,8 @@ + return -EINVAL; + + return 0; +- + } ++EXPORT_SYMBOL(ecc_is_pubkey_valid_partial); + + int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, + const u64 *private_key, const u64 *public_key, +@@ -1111,3 +1506,6 @@ + out: + return ret; + } ++EXPORT_SYMBOL(crypto_ecdh_shared_secret); ++ ++MODULE_LICENSE("Dual BSD/GPL"); +--- linux-4.19.82/crypto/ecc.h 2019-11-06 12:06:31.000000000 +0000 ++++ linux-4.19.82/crypto/ecc.h 2022-04-06 23:46:51.136579712 +0100 +@@ -26,13 +26,51 @@ + #ifndef _CRYPTO_ECC_H + #define _CRYPTO_ECC_H + ++/* One digit is u64 qword. */ + #define ECC_CURVE_NIST_P192_DIGITS 3 + #define ECC_CURVE_NIST_P256_DIGITS 4 +-#define ECC_MAX_DIGITS ECC_CURVE_NIST_P256_DIGITS ++#define ECC_MAX_DIGITS (512 / 64) + + #define ECC_DIGITS_TO_BYTES_SHIFT 3 + + /** ++ * struct ecc_point - elliptic curve point in affine coordinates ++ * ++ * @x: X coordinate in vli form. ++ * @y: Y coordinate in vli form. ++ * @ndigits: Length of vlis in u64 qwords. ++ */ ++struct ecc_point { ++ u64 *x; ++ u64 *y; ++ u8 ndigits; ++}; ++ ++#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits } ++ ++/** ++ * struct ecc_curve - definition of elliptic curve ++ * ++ * @name: Short name of the curve. ++ * @g: Generator point of the curve. ++ * @p: Prime number, if Barrett's reduction is used for this curve ++ * pre-calculated value 'mu' is appended to the @p after ndigits. ++ * Use of Barrett's reduction is heuristically determined in ++ * vli_mmod_fast(). ++ * @n: Order of the curve group. ++ * @a: Curve parameter a. ++ * @b: Curve parameter b. ++ */ ++struct ecc_curve { ++ char *name; ++ struct ecc_point g; ++ u64 *p; ++ u64 *n; ++ u64 *a; ++ u64 *b; ++}; ++ ++/** + * ecc_is_key_valid() - Validate a given ECDH private key + * + * @curve_id: id representing the curve to use +@@ -91,4 +129,117 @@ + int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, + const u64 *private_key, const u64 *public_key, + u64 *secret); ++ ++/** ++ * ecc_is_pubkey_valid_partial() - Partial public key validation ++ * ++ * @curve: elliptic curve domain parameters ++ * @pk: public key as a point ++ * ++ * Valdiate public key according to SP800-56A section 5.6.2.3.4 ECC Partial ++ * Public-Key Validation Routine. ++ * ++ * Note: There is no check that the public key is in the correct elliptic curve ++ * subgroup. ++ * ++ * Return: 0 if validation is successful, -EINVAL if validation is failed. ++ */ ++int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, ++ struct ecc_point *pk); ++ ++/** ++ * vli_is_zero() - Determine is vli is zero ++ * ++ * @vli: vli to check. ++ * @ndigits: length of the @vli ++ */ ++bool vli_is_zero(const u64 *vli, unsigned int ndigits); ++ ++/** ++ * vli_cmp() - compare left and right vlis ++ * ++ * @left: vli ++ * @right: vli ++ * @ndigits: length of both vlis ++ * ++ * Returns sign of @left - @right, i.e. -1 if @left < @right, ++ * 0 if @left == @right, 1 if @left > @right. ++ */ ++int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits); ++ ++/** ++ * vli_sub() - Subtracts right from left ++ * ++ * @result: where to write result ++ * @left: vli ++ * @right vli ++ * @ndigits: length of all vlis ++ * ++ * Note: can modify in-place. ++ * ++ * Return: carry bit. ++ */ ++u64 vli_sub(u64 *result, const u64 *left, const u64 *right, ++ unsigned int ndigits); ++ ++/** ++ * vli_from_be64() - Load vli from big-endian u64 array ++ * ++ * @dest: destination vli ++ * @src: source array of u64 BE values ++ * @ndigits: length of both vli and array ++ */ ++void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits); ++ ++/** ++ * vli_from_le64() - Load vli from little-endian u64 array ++ * ++ * @dest: destination vli ++ * @src: source array of u64 LE values ++ * @ndigits: length of both vli and array ++ */ ++void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits); ++ ++/** ++ * vli_mod_inv() - Modular inversion ++ * ++ * @result: where to write vli number ++ * @input: vli value to operate on ++ * @mod: modulus ++ * @ndigits: length of all vlis ++ */ ++void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod, ++ unsigned int ndigits); ++ ++/** ++ * vli_mod_mult_slow() - Modular multiplication ++ * ++ * @result: where to write result value ++ * @left: vli number to multiply with @right ++ * @right: vli number to multiply with @left ++ * @mod: modulus ++ * @ndigits: length of all vlis ++ * ++ * Note: Assumes that mod is big enough curve order. ++ */ ++void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right, ++ const u64 *mod, unsigned int ndigits); ++ ++/** ++ * ecc_point_mult_shamir() - Add two points multiplied by scalars ++ * ++ * @result: resulting point ++ * @x: scalar to multiply with @p ++ * @p: point to multiply with @x ++ * @y: scalar to multiply with @q ++ * @q: point to multiply with @y ++ * @curve: curve ++ * ++ * Returns result = x * p + x * q over the curve. ++ * This works faster than two multiplications and addition. ++ */ ++void ecc_point_mult_shamir(const struct ecc_point *result, ++ const u64 *x, const struct ecc_point *p, ++ const u64 *y, const struct ecc_point *q, ++ const struct ecc_curve *curve); + #endif +--- linux-4.19.82/crypto/ecc_curve_defs.h 2019-11-06 12:06:31.000000000 +0000 ++++ linux-4.19.82/crypto/ecc_curve_defs.h 2022-04-06 23:47:41.973116885 +0100 +@@ -2,21 +2,6 @@ + #ifndef _CRYTO_ECC_CURVE_DEFS_H + #define _CRYTO_ECC_CURVE_DEFS_H + +-struct ecc_point { +- u64 *x; +- u64 *y; +- u8 ndigits; +-}; +- +-struct ecc_curve { +- char *name; +- struct ecc_point g; +- u64 *p; +- u64 *n; +- u64 *a; +- u64 *b; +-}; +- + /* NIST P-192: a = p - 3 */ + static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull, + 0x188DA80EB03090F6ull };