RPM build fix (reverted CI changes which will need to be un-reverted or made conditional) and vendor Rust dependencies to make builds much faster in any CI system.

This commit is contained in:
Adam Ierymenko
2022-06-08 07:32:16 -04:00
parent 373ca30269
commit d5ca4e5f52
12611 changed files with 2898014 additions and 284 deletions

107
zeroidc/vendor/sha2/src/consts.rs vendored Normal file
View File

@@ -0,0 +1,107 @@
#![allow(dead_code, clippy::unreadable_literal)]
pub const STATE_LEN: usize = 8;
pub const BLOCK_LEN: usize = 16;
pub type State256 = [u32; STATE_LEN];
pub type State512 = [u64; STATE_LEN];
/// Constants necessary for SHA-256 family of digests.
pub const K32: [u32; 64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
];
/// Constants necessary for SHA-256 family of digests.
pub const K32X4: [[u32; 4]; 16] = [
[K32[3], K32[2], K32[1], K32[0]],
[K32[7], K32[6], K32[5], K32[4]],
[K32[11], K32[10], K32[9], K32[8]],
[K32[15], K32[14], K32[13], K32[12]],
[K32[19], K32[18], K32[17], K32[16]],
[K32[23], K32[22], K32[21], K32[20]],
[K32[27], K32[26], K32[25], K32[24]],
[K32[31], K32[30], K32[29], K32[28]],
[K32[35], K32[34], K32[33], K32[32]],
[K32[39], K32[38], K32[37], K32[36]],
[K32[43], K32[42], K32[41], K32[40]],
[K32[47], K32[46], K32[45], K32[44]],
[K32[51], K32[50], K32[49], K32[48]],
[K32[55], K32[54], K32[53], K32[52]],
[K32[59], K32[58], K32[57], K32[56]],
[K32[63], K32[62], K32[61], K32[60]],
];
/// Constants necessary for SHA-512 family of digests.
pub const K64: [u64; 80] = [
0x428a2f98d728ae22, 0x7137449123ef65cd, 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc,
0x3956c25bf348b538, 0x59f111f1b605d019, 0x923f82a4af194f9b, 0xab1c5ed5da6d8118,
0xd807aa98a3030242, 0x12835b0145706fbe, 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2,
0x72be5d74f27b896f, 0x80deb1fe3b1696b1, 0x9bdc06a725c71235, 0xc19bf174cf692694,
0xe49b69c19ef14ad2, 0xefbe4786384f25e3, 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65,
0x2de92c6f592b0275, 0x4a7484aa6ea6e483, 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5,
0x983e5152ee66dfab, 0xa831c66d2db43210, 0xb00327c898fb213f, 0xbf597fc7beef0ee4,
0xc6e00bf33da88fc2, 0xd5a79147930aa725, 0x06ca6351e003826f, 0x142929670a0e6e70,
0x27b70a8546d22ffc, 0x2e1b21385c26c926, 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df,
0x650a73548baf63de, 0x766a0abb3c77b2a8, 0x81c2c92e47edaee6, 0x92722c851482353b,
0xa2bfe8a14cf10364, 0xa81a664bbc423001, 0xc24b8b70d0f89791, 0xc76c51a30654be30,
0xd192e819d6ef5218, 0xd69906245565a910, 0xf40e35855771202a, 0x106aa07032bbd1b8,
0x19a4c116b8d2d0c8, 0x1e376c085141ab53, 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8,
0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb, 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3,
0x748f82ee5defb2fc, 0x78a5636f43172f60, 0x84c87814a1f0ab72, 0x8cc702081a6439ec,
0x90befffa23631e28, 0xa4506cebde82bde9, 0xbef9a3f7b2c67915, 0xc67178f2e372532b,
0xca273eceea26619c, 0xd186b8c721c0c207, 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178,
0x06f067aa72176fba, 0x0a637dc5a2c898a6, 0x113f9804bef90dae, 0x1b710b35131c471b,
0x28db77f523047d84, 0x32caab7b40c72493, 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c,
0x4cc5d4becb3e42b6, 0x597f299cfc657e2a, 0x5fcb6fab3ad6faec, 0x6c44198c4a475817,
];
/// Constants necessary for SHA-512 family of digests.
pub const K64X2: [[u64; 2]; 40] = [
[K64[1], K64[0]], [K64[3], K64[2]], [K64[5], K64[4]], [K64[7], K64[6]],
[K64[9], K64[8]], [K64[11], K64[10]], [K64[13], K64[12]], [K64[15], K64[14]],
[K64[17], K64[16]], [K64[19], K64[18]], [K64[21], K64[20]], [K64[23], K64[22]],
[K64[25], K64[24]], [K64[27], K64[26]], [K64[29], K64[28]], [K64[31], K64[30]],
[K64[33], K64[32]], [K64[35], K64[34]], [K64[37], K64[36]], [K64[39], K64[38]],
[K64[41], K64[40]], [K64[43], K64[42]], [K64[45], K64[44]], [K64[47], K64[46]],
[K64[49], K64[48]], [K64[51], K64[50]], [K64[53], K64[52]], [K64[55], K64[54]],
[K64[57], K64[56]], [K64[59], K64[58]], [K64[61], K64[60]], [K64[63], K64[62]],
[K64[65], K64[64]], [K64[67], K64[66]], [K64[69], K64[68]], [K64[71], K64[70]],
[K64[73], K64[72]], [K64[75], K64[74]], [K64[77], K64[76]], [K64[79], K64[78]],
];
pub const H256_224: State256 = [
0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939,
0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4,
];
pub const H256_256: State256 = [
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
];
pub const H512_224: State512 = [
0x8c3d37c819544da2, 0x73e1996689dcd4d6, 0x1dfab7ae32ff9c82, 0x679dd514582f9fcf,
0x0f6d2b697bd44da8, 0x77e36f7304c48942, 0x3f9d85a86a1d36c8, 0x1112e6ad91d692a1,
];
pub const H512_256: State512 = [
0x22312194fc2bf72c, 0x9f555fa3c84c64c2, 0x2393b86b6f53b151, 0x963877195940eabd,
0x96283ee2a88effe3, 0xbe5e1e2553863992, 0x2b0199fc2c85b8aa, 0x0eb72ddc81c52ca2,
];
pub const H512_384: State512 = [
0xcbbb9d5dc1059ed8, 0x629a292a367cd507, 0x9159015a3070dd17, 0x152fecd8f70e5939,
0x67332667ffc00b31, 0x8eb44a8768581511, 0xdb0c2e0d64f98fa7, 0x47b5481dbefa4fa4,
];
pub const H512_512: State512 = [
0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
];

157
zeroidc/vendor/sha2/src/core_api.rs vendored Normal file
View File

@@ -0,0 +1,157 @@
use crate::{consts, sha256::compress256, sha512::compress512};
use core::{fmt, slice::from_ref};
use digest::{
block_buffer::Eager,
core_api::{
AlgorithmName, Block, BlockSizeUser, Buffer, BufferKindUser, OutputSizeUser, TruncSide,
UpdateCore, VariableOutputCore,
},
typenum::{Unsigned, U128, U32, U64},
HashMarker, InvalidOutputSize, Output,
};
/// Core block-level SHA-256 hasher with variable output size.
///
/// Supports initialization only for 28 and 32 byte output sizes,
/// i.e. 224 and 256 bits respectively.
#[derive(Clone)]
pub struct Sha256VarCore {
state: consts::State256,
block_len: u64,
}
impl HashMarker for Sha256VarCore {}
impl BlockSizeUser for Sha256VarCore {
type BlockSize = U64;
}
impl BufferKindUser for Sha256VarCore {
type BufferKind = Eager;
}
impl UpdateCore for Sha256VarCore {
#[inline]
fn update_blocks(&mut self, blocks: &[Block<Self>]) {
self.block_len += blocks.len() as u64;
compress256(&mut self.state, blocks);
}
}
impl OutputSizeUser for Sha256VarCore {
type OutputSize = U32;
}
impl VariableOutputCore for Sha256VarCore {
const TRUNC_SIDE: TruncSide = TruncSide::Left;
#[inline]
fn new(output_size: usize) -> Result<Self, InvalidOutputSize> {
let state = match output_size {
28 => consts::H256_224,
32 => consts::H256_256,
_ => return Err(InvalidOutputSize),
};
let block_len = 0;
Ok(Self { state, block_len })
}
#[inline]
fn finalize_variable_core(&mut self, buffer: &mut Buffer<Self>, out: &mut Output<Self>) {
let bs = Self::BlockSize::U64;
let bit_len = 8 * (buffer.get_pos() as u64 + bs * self.block_len);
buffer.len64_padding_be(bit_len, |b| compress256(&mut self.state, from_ref(b)));
for (chunk, v) in out.chunks_exact_mut(4).zip(self.state.iter()) {
chunk.copy_from_slice(&v.to_be_bytes());
}
}
}
impl AlgorithmName for Sha256VarCore {
#[inline]
fn write_alg_name(f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Sha256")
}
}
impl fmt::Debug for Sha256VarCore {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Sha256VarCore { ... }")
}
}
/// Core block-level SHA-512 hasher with variable output size.
///
/// Supports initialization only for 28, 32, 48, and 64 byte output sizes,
/// i.e. 224, 256, 384, and 512 bits respectively.
#[derive(Clone)]
pub struct Sha512VarCore {
state: consts::State512,
block_len: u128,
}
impl HashMarker for Sha512VarCore {}
impl BlockSizeUser for Sha512VarCore {
type BlockSize = U128;
}
impl BufferKindUser for Sha512VarCore {
type BufferKind = Eager;
}
impl UpdateCore for Sha512VarCore {
#[inline]
fn update_blocks(&mut self, blocks: &[Block<Self>]) {
self.block_len += blocks.len() as u128;
compress512(&mut self.state, blocks);
}
}
impl OutputSizeUser for Sha512VarCore {
type OutputSize = U64;
}
impl VariableOutputCore for Sha512VarCore {
const TRUNC_SIDE: TruncSide = TruncSide::Left;
#[inline]
fn new(output_size: usize) -> Result<Self, InvalidOutputSize> {
let state = match output_size {
28 => consts::H512_224,
32 => consts::H512_256,
48 => consts::H512_384,
64 => consts::H512_512,
_ => return Err(InvalidOutputSize),
};
let block_len = 0;
Ok(Self { state, block_len })
}
#[inline]
fn finalize_variable_core(&mut self, buffer: &mut Buffer<Self>, out: &mut Output<Self>) {
let bs = Self::BlockSize::U64 as u128;
let bit_len = 8 * (buffer.get_pos() as u128 + bs * self.block_len);
buffer.len128_padding_be(bit_len, |b| compress512(&mut self.state, from_ref(b)));
for (chunk, v) in out.chunks_exact_mut(8).zip(self.state.iter()) {
chunk.copy_from_slice(&v.to_be_bytes());
}
}
}
impl AlgorithmName for Sha512VarCore {
#[inline]
fn write_alg_name(f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Sha512")
}
}
impl fmt::Debug for Sha512VarCore {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Sha512VarCore { ... }")
}
}

86
zeroidc/vendor/sha2/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,86 @@
//! An implementation of the [SHA-2][1] cryptographic hash algorithms.
//!
//! There are 6 standard algorithms specified in the SHA-2 standard: [`Sha224`],
//! [`Sha256`], [`Sha512_224`], [`Sha512_256`], [`Sha384`], and [`Sha512`].
//!
//! Algorithmically, there are only 2 core algorithms: SHA-256 and SHA-512.
//! All other algorithms are just applications of these with different initial
//! hash values, and truncated to different digest bit lengths. The first two
//! algorithms in the list are based on SHA-256, while the last three on SHA-512.
//!
//! # Usage
//!
//! ```rust
//! use hex_literal::hex;
//! use sha2::{Sha256, Sha512, Digest};
//!
//! // create a Sha256 object
//! let mut hasher = Sha256::new();
//!
//! // write input message
//! hasher.update(b"hello world");
//!
//! // read hash digest and consume hasher
//! let result = hasher.finalize();
//!
//! assert_eq!(result[..], hex!("
//! b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9
//! ")[..]);
//!
//! // same for Sha512
//! let mut hasher = Sha512::new();
//! hasher.update(b"hello world");
//! let result = hasher.finalize();
//!
//! assert_eq!(result[..], hex!("
//! 309ecc489c12d6eb4cc40f50c902f2b4d0ed77ee511a7c7a9bcd3ca86d4cd86f
//! 989dd35bc5ff499670da34255b45b0cfd830e81f605dcf7dc5542e93ae9cd76f
//! ")[..]);
//! ```
//!
//! Also see [RustCrypto/hashes][2] readme.
//!
//! [1]: https://en.wikipedia.org/wiki/SHA-2
//! [2]: https://github.com/RustCrypto/hashes
#![no_std]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
html_root_url = "https://docs.rs/sha2/0.10.2"
)]
#![warn(missing_docs, rust_2018_idioms)]
pub use digest::{self, Digest};
use digest::{
consts::{U28, U32, U48, U64},
core_api::{CoreWrapper, CtVariableCoreWrapper},
};
#[rustfmt::skip]
mod consts;
mod core_api;
mod sha256;
mod sha512;
#[cfg(feature = "compress")]
pub use sha256::compress256;
#[cfg(feature = "compress")]
pub use sha512::compress512;
pub use core_api::{Sha256VarCore, Sha512VarCore};
/// SHA-224 hasher.
pub type Sha224 = CoreWrapper<CtVariableCoreWrapper<Sha256VarCore, U28>>;
/// SHA-256 hasher.
pub type Sha256 = CoreWrapper<CtVariableCoreWrapper<Sha256VarCore, U32>>;
/// SHA-512/224 hasher.
pub type Sha512_224 = CoreWrapper<CtVariableCoreWrapper<Sha512VarCore, U28>>;
/// SHA-512/256 hasher.
pub type Sha512_256 = CoreWrapper<CtVariableCoreWrapper<Sha512VarCore, U32>>;
/// SHA-384 hasher.
pub type Sha384 = CoreWrapper<CtVariableCoreWrapper<Sha512VarCore, U48>>;
/// SHA-512 hasher.
pub type Sha512 = CoreWrapper<CtVariableCoreWrapper<Sha512VarCore, U64>>;

37
zeroidc/vendor/sha2/src/sha256.rs vendored Normal file
View File

@@ -0,0 +1,37 @@
use digest::{generic_array::GenericArray, typenum::U64};
cfg_if::cfg_if! {
if #[cfg(feature = "force-soft")] {
mod soft;
use soft::compress;
} else if #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] {
#[cfg(not(feature = "asm"))]
mod soft;
#[cfg(feature = "asm")]
mod soft {
pub(crate) use sha2_asm::compress256 as compress;
}
mod x86;
use x86::compress;
} else if #[cfg(all(feature = "asm", target_arch = "aarch64"))] {
mod soft;
mod aarch64;
use aarch64::compress;
} else {
mod soft;
use soft::compress;
}
}
/// Raw SHA-256 compression function.
///
/// This is a low-level "hazmat" API which provides direct access to the core
/// functionality of SHA-256.
#[cfg_attr(docsrs, doc(cfg(feature = "compress")))]
pub fn compress256(state: &mut [u32; 8], blocks: &[GenericArray<u8, U64>]) {
// SAFETY: GenericArray<u8, U64> and [u8; 64] have
// exactly the same memory layout
let p = blocks.as_ptr() as *const [u8; 64];
let blocks = unsafe { core::slice::from_raw_parts(p, blocks.len()) };
compress(state, blocks)
}

View File

@@ -0,0 +1,15 @@
//! SHA-256 `aarch64` backend.
// TODO: stdarch intrinsics: RustCrypto/hashes#257
cpufeatures::new!(sha2_hwcap, "sha2");
pub fn compress(state: &mut [u32; 8], blocks: &[[u8; 64]]) {
// TODO: Replace with https://github.com/rust-lang/rfcs/pull/2725
// after stabilization
if sha2_hwcap::get() {
sha2_asm::compress256(state, blocks);
} else {
super::soft::compress(state, blocks);
}
}

218
zeroidc/vendor/sha2/src/sha256/soft.rs vendored Normal file
View File

@@ -0,0 +1,218 @@
#![allow(clippy::many_single_char_names)]
use crate::consts::BLOCK_LEN;
use core::convert::TryInto;
#[inline(always)]
fn shl(v: [u32; 4], o: u32) -> [u32; 4] {
[v[0] >> o, v[1] >> o, v[2] >> o, v[3] >> o]
}
#[inline(always)]
fn shr(v: [u32; 4], o: u32) -> [u32; 4] {
[v[0] << o, v[1] << o, v[2] << o, v[3] << o]
}
#[inline(always)]
fn or(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
[a[0] | b[0], a[1] | b[1], a[2] | b[2], a[3] | b[3]]
}
#[inline(always)]
fn xor(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
[a[0] ^ b[0], a[1] ^ b[1], a[2] ^ b[2], a[3] ^ b[3]]
}
#[inline(always)]
fn add(a: [u32; 4], b: [u32; 4]) -> [u32; 4] {
[
a[0].wrapping_add(b[0]),
a[1].wrapping_add(b[1]),
a[2].wrapping_add(b[2]),
a[3].wrapping_add(b[3]),
]
}
fn sha256load(v2: [u32; 4], v3: [u32; 4]) -> [u32; 4] {
[v3[3], v2[0], v2[1], v2[2]]
}
fn sha256swap(v0: [u32; 4]) -> [u32; 4] {
[v0[2], v0[3], v0[0], v0[1]]
}
fn sha256msg1(v0: [u32; 4], v1: [u32; 4]) -> [u32; 4] {
// sigma 0 on vectors
#[inline]
fn sigma0x4(x: [u32; 4]) -> [u32; 4] {
let t1 = or(shl(x, 7), shr(x, 25));
let t2 = or(shl(x, 18), shr(x, 14));
let t3 = shl(x, 3);
xor(xor(t1, t2), t3)
}
add(v0, sigma0x4(sha256load(v0, v1)))
}
fn sha256msg2(v4: [u32; 4], v3: [u32; 4]) -> [u32; 4] {
macro_rules! sigma1 {
($a:expr) => {
$a.rotate_right(17) ^ $a.rotate_right(19) ^ ($a >> 10)
};
}
let [x3, x2, x1, x0] = v4;
let [w15, w14, _, _] = v3;
let w16 = x0.wrapping_add(sigma1!(w14));
let w17 = x1.wrapping_add(sigma1!(w15));
let w18 = x2.wrapping_add(sigma1!(w16));
let w19 = x3.wrapping_add(sigma1!(w17));
[w19, w18, w17, w16]
}
fn sha256_digest_round_x2(cdgh: [u32; 4], abef: [u32; 4], wk: [u32; 4]) -> [u32; 4] {
macro_rules! big_sigma0 {
($a:expr) => {
($a.rotate_right(2) ^ $a.rotate_right(13) ^ $a.rotate_right(22))
};
}
macro_rules! big_sigma1 {
($a:expr) => {
($a.rotate_right(6) ^ $a.rotate_right(11) ^ $a.rotate_right(25))
};
}
macro_rules! bool3ary_202 {
($a:expr, $b:expr, $c:expr) => {
$c ^ ($a & ($b ^ $c))
};
} // Choose, MD5F, SHA1C
macro_rules! bool3ary_232 {
($a:expr, $b:expr, $c:expr) => {
($a & $b) ^ ($a & $c) ^ ($b & $c)
};
} // Majority, SHA1M
let [_, _, wk1, wk0] = wk;
let [a0, b0, e0, f0] = abef;
let [c0, d0, g0, h0] = cdgh;
// a round
let x0 = big_sigma1!(e0)
.wrapping_add(bool3ary_202!(e0, f0, g0))
.wrapping_add(wk0)
.wrapping_add(h0);
let y0 = big_sigma0!(a0).wrapping_add(bool3ary_232!(a0, b0, c0));
let (a1, b1, c1, d1, e1, f1, g1, h1) = (
x0.wrapping_add(y0),
a0,
b0,
c0,
x0.wrapping_add(d0),
e0,
f0,
g0,
);
// a round
let x1 = big_sigma1!(e1)
.wrapping_add(bool3ary_202!(e1, f1, g1))
.wrapping_add(wk1)
.wrapping_add(h1);
let y1 = big_sigma0!(a1).wrapping_add(bool3ary_232!(a1, b1, c1));
let (a2, b2, _, _, e2, f2, _, _) = (
x1.wrapping_add(y1),
a1,
b1,
c1,
x1.wrapping_add(d1),
e1,
f1,
g1,
);
[a2, b2, e2, f2]
}
fn schedule(v0: [u32; 4], v1: [u32; 4], v2: [u32; 4], v3: [u32; 4]) -> [u32; 4] {
let t1 = sha256msg1(v0, v1);
let t2 = sha256load(v2, v3);
let t3 = add(t1, t2);
sha256msg2(t3, v3)
}
macro_rules! rounds4 {
($abef:ident, $cdgh:ident, $rest:expr, $i:expr) => {{
let t1 = add($rest, crate::consts::K32X4[$i]);
$cdgh = sha256_digest_round_x2($cdgh, $abef, t1);
let t2 = sha256swap(t1);
$abef = sha256_digest_round_x2($abef, $cdgh, t2);
}};
}
macro_rules! schedule_rounds4 {
(
$abef:ident, $cdgh:ident,
$w0:expr, $w1:expr, $w2:expr, $w3:expr, $w4:expr,
$i: expr
) => {{
$w4 = schedule($w0, $w1, $w2, $w3);
rounds4!($abef, $cdgh, $w4, $i);
}};
}
/// Process a block with the SHA-256 algorithm.
fn sha256_digest_block_u32(state: &mut [u32; 8], block: &[u32; 16]) {
let mut abef = [state[0], state[1], state[4], state[5]];
let mut cdgh = [state[2], state[3], state[6], state[7]];
// Rounds 0..64
let mut w0 = [block[3], block[2], block[1], block[0]];
let mut w1 = [block[7], block[6], block[5], block[4]];
let mut w2 = [block[11], block[10], block[9], block[8]];
let mut w3 = [block[15], block[14], block[13], block[12]];
let mut w4;
rounds4!(abef, cdgh, w0, 0);
rounds4!(abef, cdgh, w1, 1);
rounds4!(abef, cdgh, w2, 2);
rounds4!(abef, cdgh, w3, 3);
schedule_rounds4!(abef, cdgh, w0, w1, w2, w3, w4, 4);
schedule_rounds4!(abef, cdgh, w1, w2, w3, w4, w0, 5);
schedule_rounds4!(abef, cdgh, w2, w3, w4, w0, w1, 6);
schedule_rounds4!(abef, cdgh, w3, w4, w0, w1, w2, 7);
schedule_rounds4!(abef, cdgh, w4, w0, w1, w2, w3, 8);
schedule_rounds4!(abef, cdgh, w0, w1, w2, w3, w4, 9);
schedule_rounds4!(abef, cdgh, w1, w2, w3, w4, w0, 10);
schedule_rounds4!(abef, cdgh, w2, w3, w4, w0, w1, 11);
schedule_rounds4!(abef, cdgh, w3, w4, w0, w1, w2, 12);
schedule_rounds4!(abef, cdgh, w4, w0, w1, w2, w3, 13);
schedule_rounds4!(abef, cdgh, w0, w1, w2, w3, w4, 14);
schedule_rounds4!(abef, cdgh, w1, w2, w3, w4, w0, 15);
let [a, b, e, f] = abef;
let [c, d, g, h] = cdgh;
state[0] = state[0].wrapping_add(a);
state[1] = state[1].wrapping_add(b);
state[2] = state[2].wrapping_add(c);
state[3] = state[3].wrapping_add(d);
state[4] = state[4].wrapping_add(e);
state[5] = state[5].wrapping_add(f);
state[6] = state[6].wrapping_add(g);
state[7] = state[7].wrapping_add(h);
}
pub fn compress(state: &mut [u32; 8], blocks: &[[u8; 64]]) {
let mut block_u32 = [0u32; BLOCK_LEN];
// since LLVM can't properly use aliasing yet it will make
// unnecessary state stores without this copy
let mut state_cpy = *state;
for block in blocks {
for (o, chunk) in block_u32.iter_mut().zip(block.chunks_exact(4)) {
*o = u32::from_be_bytes(chunk.try_into().unwrap());
}
sha256_digest_block_u32(&mut state_cpy, &block_u32);
}
*state = state_cpy;
}

112
zeroidc/vendor/sha2/src/sha256/x86.rs vendored Normal file
View File

@@ -0,0 +1,112 @@
//! SHA-256 `x86`/`x86_64` backend
#![allow(clippy::many_single_char_names)]
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
unsafe fn schedule(v0: __m128i, v1: __m128i, v2: __m128i, v3: __m128i) -> __m128i {
let t1 = _mm_sha256msg1_epu32(v0, v1);
let t2 = _mm_alignr_epi8(v3, v2, 4);
let t3 = _mm_add_epi32(t1, t2);
_mm_sha256msg2_epu32(t3, v3)
}
macro_rules! rounds4 {
($abef:ident, $cdgh:ident, $rest:expr, $i:expr) => {{
let k = crate::consts::K32X4[$i];
let kv = _mm_set_epi32(k[0] as i32, k[1] as i32, k[2] as i32, k[3] as i32);
let t1 = _mm_add_epi32($rest, kv);
$cdgh = _mm_sha256rnds2_epu32($cdgh, $abef, t1);
let t2 = _mm_shuffle_epi32(t1, 0x0E);
$abef = _mm_sha256rnds2_epu32($abef, $cdgh, t2);
}};
}
macro_rules! schedule_rounds4 {
(
$abef:ident, $cdgh:ident,
$w0:expr, $w1:expr, $w2:expr, $w3:expr, $w4:expr,
$i: expr
) => {{
$w4 = schedule($w0, $w1, $w2, $w3);
rounds4!($abef, $cdgh, $w4, $i);
}};
}
// we use unaligned loads with `__m128i` pointers
#[allow(clippy::cast_ptr_alignment)]
#[target_feature(enable = "sha,sse2,ssse3,sse4.1")]
unsafe fn digest_blocks(state: &mut [u32; 8], blocks: &[[u8; 64]]) {
#[allow(non_snake_case)]
let MASK: __m128i = _mm_set_epi64x(
0x0C0D_0E0F_0809_0A0Bu64 as i64,
0x0405_0607_0001_0203u64 as i64,
);
let state_ptr = state.as_ptr() as *const __m128i;
let dcba = _mm_loadu_si128(state_ptr.add(0));
let efgh = _mm_loadu_si128(state_ptr.add(1));
let cdab = _mm_shuffle_epi32(dcba, 0xB1);
let efgh = _mm_shuffle_epi32(efgh, 0x1B);
let mut abef = _mm_alignr_epi8(cdab, efgh, 8);
let mut cdgh = _mm_blend_epi16(efgh, cdab, 0xF0);
for block in blocks {
let abef_save = abef;
let cdgh_save = cdgh;
let data_ptr = block.as_ptr() as *const __m128i;
let mut w0 = _mm_shuffle_epi8(_mm_loadu_si128(data_ptr.add(0)), MASK);
let mut w1 = _mm_shuffle_epi8(_mm_loadu_si128(data_ptr.add(1)), MASK);
let mut w2 = _mm_shuffle_epi8(_mm_loadu_si128(data_ptr.add(2)), MASK);
let mut w3 = _mm_shuffle_epi8(_mm_loadu_si128(data_ptr.add(3)), MASK);
let mut w4;
rounds4!(abef, cdgh, w0, 0);
rounds4!(abef, cdgh, w1, 1);
rounds4!(abef, cdgh, w2, 2);
rounds4!(abef, cdgh, w3, 3);
schedule_rounds4!(abef, cdgh, w0, w1, w2, w3, w4, 4);
schedule_rounds4!(abef, cdgh, w1, w2, w3, w4, w0, 5);
schedule_rounds4!(abef, cdgh, w2, w3, w4, w0, w1, 6);
schedule_rounds4!(abef, cdgh, w3, w4, w0, w1, w2, 7);
schedule_rounds4!(abef, cdgh, w4, w0, w1, w2, w3, 8);
schedule_rounds4!(abef, cdgh, w0, w1, w2, w3, w4, 9);
schedule_rounds4!(abef, cdgh, w1, w2, w3, w4, w0, 10);
schedule_rounds4!(abef, cdgh, w2, w3, w4, w0, w1, 11);
schedule_rounds4!(abef, cdgh, w3, w4, w0, w1, w2, 12);
schedule_rounds4!(abef, cdgh, w4, w0, w1, w2, w3, 13);
schedule_rounds4!(abef, cdgh, w0, w1, w2, w3, w4, 14);
schedule_rounds4!(abef, cdgh, w1, w2, w3, w4, w0, 15);
abef = _mm_add_epi32(abef, abef_save);
cdgh = _mm_add_epi32(cdgh, cdgh_save);
}
let feba = _mm_shuffle_epi32(abef, 0x1B);
let dchg = _mm_shuffle_epi32(cdgh, 0xB1);
let dcba = _mm_blend_epi16(feba, dchg, 0xF0);
let hgef = _mm_alignr_epi8(dchg, feba, 8);
let state_ptr_mut = state.as_mut_ptr() as *mut __m128i;
_mm_storeu_si128(state_ptr_mut.add(0), dcba);
_mm_storeu_si128(state_ptr_mut.add(1), hgef);
}
cpufeatures::new!(shani_cpuid, "sha", "sse2", "ssse3", "sse4.1");
pub fn compress(state: &mut [u32; 8], blocks: &[[u8; 64]]) {
// TODO: Replace with https://github.com/rust-lang/rfcs/pull/2725
// after stabilization
if shani_cpuid::get() {
unsafe {
digest_blocks(state, blocks);
}
} else {
super::soft::compress(state, blocks);
}
}

35
zeroidc/vendor/sha2/src/sha512.rs vendored Normal file
View File

@@ -0,0 +1,35 @@
use digest::{generic_array::GenericArray, typenum::U128};
cfg_if::cfg_if! {
if #[cfg(feature = "force-soft")] {
mod soft;
use soft::compress;
} else if #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] {
#[cfg(not(feature = "asm"))]
mod soft;
#[cfg(feature = "asm")]
mod soft {
pub(crate) fn compress(state: &mut [u64; 8], blocks: &[[u8; 128]]) {
sha2_asm::compress512(state, blocks);
}
}
mod x86;
use x86::compress;
} else {
mod soft;
use soft::compress;
}
}
/// Raw SHA-512 compression function.
///
/// This is a low-level "hazmat" API which provides direct access to the core
/// functionality of SHA-512.
#[cfg_attr(docsrs, doc(cfg(feature = "compress")))]
pub fn compress512(state: &mut [u64; 8], blocks: &[GenericArray<u8, U128>]) {
// SAFETY: GenericArray<u8, U64> and [u8; 64] have
// exactly the same memory layout
let p = blocks.as_ptr() as *const [u8; 128];
let blocks = unsafe { core::slice::from_raw_parts(p, blocks.len()) };
compress(state, blocks)
}

215
zeroidc/vendor/sha2/src/sha512/soft.rs vendored Normal file
View File

@@ -0,0 +1,215 @@
#![allow(clippy::many_single_char_names)]
use crate::consts::{BLOCK_LEN, K64X2};
use core::convert::TryInto;
fn add(a: [u64; 2], b: [u64; 2]) -> [u64; 2] {
[a[0].wrapping_add(b[0]), a[1].wrapping_add(b[1])]
}
/// Not an intrinsic, but works like an unaligned load.
fn sha512load(v0: [u64; 2], v1: [u64; 2]) -> [u64; 2] {
[v1[1], v0[0]]
}
/// Performs 2 rounds of the SHA-512 message schedule update.
pub fn sha512_schedule_x2(v0: [u64; 2], v1: [u64; 2], v4to5: [u64; 2], v7: [u64; 2]) -> [u64; 2] {
// sigma 0
fn sigma0(x: u64) -> u64 {
((x << 63) | (x >> 1)) ^ ((x << 56) | (x >> 8)) ^ (x >> 7)
}
// sigma 1
fn sigma1(x: u64) -> u64 {
((x << 45) | (x >> 19)) ^ ((x << 3) | (x >> 61)) ^ (x >> 6)
}
let [w1, w0] = v0;
let [_, w2] = v1;
let [w10, w9] = v4to5;
let [w15, w14] = v7;
let w16 = sigma1(w14)
.wrapping_add(w9)
.wrapping_add(sigma0(w1))
.wrapping_add(w0);
let w17 = sigma1(w15)
.wrapping_add(w10)
.wrapping_add(sigma0(w2))
.wrapping_add(w1);
[w17, w16]
}
/// Performs one round of the SHA-512 message block digest.
pub fn sha512_digest_round(
ae: [u64; 2],
bf: [u64; 2],
cg: [u64; 2],
dh: [u64; 2],
wk0: u64,
) -> [u64; 2] {
macro_rules! big_sigma0 {
($a:expr) => {
($a.rotate_right(28) ^ $a.rotate_right(34) ^ $a.rotate_right(39))
};
}
macro_rules! big_sigma1 {
($a:expr) => {
($a.rotate_right(14) ^ $a.rotate_right(18) ^ $a.rotate_right(41))
};
}
macro_rules! bool3ary_202 {
($a:expr, $b:expr, $c:expr) => {
$c ^ ($a & ($b ^ $c))
};
} // Choose, MD5F, SHA1C
macro_rules! bool3ary_232 {
($a:expr, $b:expr, $c:expr) => {
($a & $b) ^ ($a & $c) ^ ($b & $c)
};
} // Majority, SHA1M
let [a0, e0] = ae;
let [b0, f0] = bf;
let [c0, g0] = cg;
let [d0, h0] = dh;
// a round
let x0 = big_sigma1!(e0)
.wrapping_add(bool3ary_202!(e0, f0, g0))
.wrapping_add(wk0)
.wrapping_add(h0);
let y0 = big_sigma0!(a0).wrapping_add(bool3ary_232!(a0, b0, c0));
let (a1, _, _, _, e1, _, _, _) = (
x0.wrapping_add(y0),
a0,
b0,
c0,
x0.wrapping_add(d0),
e0,
f0,
g0,
);
[a1, e1]
}
/// Process a block with the SHA-512 algorithm.
pub fn sha512_digest_block_u64(state: &mut [u64; 8], block: &[u64; 16]) {
let k = &K64X2;
macro_rules! schedule {
($v0:expr, $v1:expr, $v4:expr, $v5:expr, $v7:expr) => {
sha512_schedule_x2($v0, $v1, sha512load($v4, $v5), $v7)
};
}
macro_rules! rounds4 {
($ae:ident, $bf:ident, $cg:ident, $dh:ident, $wk0:expr, $wk1:expr) => {{
let [u, t] = $wk0;
let [w, v] = $wk1;
$dh = sha512_digest_round($ae, $bf, $cg, $dh, t);
$cg = sha512_digest_round($dh, $ae, $bf, $cg, u);
$bf = sha512_digest_round($cg, $dh, $ae, $bf, v);
$ae = sha512_digest_round($bf, $cg, $dh, $ae, w);
}};
}
let mut ae = [state[0], state[4]];
let mut bf = [state[1], state[5]];
let mut cg = [state[2], state[6]];
let mut dh = [state[3], state[7]];
// Rounds 0..20
let (mut w1, mut w0) = ([block[3], block[2]], [block[1], block[0]]);
rounds4!(ae, bf, cg, dh, add(k[0], w0), add(k[1], w1));
let (mut w3, mut w2) = ([block[7], block[6]], [block[5], block[4]]);
rounds4!(ae, bf, cg, dh, add(k[2], w2), add(k[3], w3));
let (mut w5, mut w4) = ([block[11], block[10]], [block[9], block[8]]);
rounds4!(ae, bf, cg, dh, add(k[4], w4), add(k[5], w5));
let (mut w7, mut w6) = ([block[15], block[14]], [block[13], block[12]]);
rounds4!(ae, bf, cg, dh, add(k[6], w6), add(k[7], w7));
let mut w8 = schedule!(w0, w1, w4, w5, w7);
let mut w9 = schedule!(w1, w2, w5, w6, w8);
rounds4!(ae, bf, cg, dh, add(k[8], w8), add(k[9], w9));
// Rounds 20..40
w0 = schedule!(w2, w3, w6, w7, w9);
w1 = schedule!(w3, w4, w7, w8, w0);
rounds4!(ae, bf, cg, dh, add(k[10], w0), add(k[11], w1));
w2 = schedule!(w4, w5, w8, w9, w1);
w3 = schedule!(w5, w6, w9, w0, w2);
rounds4!(ae, bf, cg, dh, add(k[12], w2), add(k[13], w3));
w4 = schedule!(w6, w7, w0, w1, w3);
w5 = schedule!(w7, w8, w1, w2, w4);
rounds4!(ae, bf, cg, dh, add(k[14], w4), add(k[15], w5));
w6 = schedule!(w8, w9, w2, w3, w5);
w7 = schedule!(w9, w0, w3, w4, w6);
rounds4!(ae, bf, cg, dh, add(k[16], w6), add(k[17], w7));
w8 = schedule!(w0, w1, w4, w5, w7);
w9 = schedule!(w1, w2, w5, w6, w8);
rounds4!(ae, bf, cg, dh, add(k[18], w8), add(k[19], w9));
// Rounds 40..60
w0 = schedule!(w2, w3, w6, w7, w9);
w1 = schedule!(w3, w4, w7, w8, w0);
rounds4!(ae, bf, cg, dh, add(k[20], w0), add(k[21], w1));
w2 = schedule!(w4, w5, w8, w9, w1);
w3 = schedule!(w5, w6, w9, w0, w2);
rounds4!(ae, bf, cg, dh, add(k[22], w2), add(k[23], w3));
w4 = schedule!(w6, w7, w0, w1, w3);
w5 = schedule!(w7, w8, w1, w2, w4);
rounds4!(ae, bf, cg, dh, add(k[24], w4), add(k[25], w5));
w6 = schedule!(w8, w9, w2, w3, w5);
w7 = schedule!(w9, w0, w3, w4, w6);
rounds4!(ae, bf, cg, dh, add(k[26], w6), add(k[27], w7));
w8 = schedule!(w0, w1, w4, w5, w7);
w9 = schedule!(w1, w2, w5, w6, w8);
rounds4!(ae, bf, cg, dh, add(k[28], w8), add(k[29], w9));
// Rounds 60..80
w0 = schedule!(w2, w3, w6, w7, w9);
w1 = schedule!(w3, w4, w7, w8, w0);
rounds4!(ae, bf, cg, dh, add(k[30], w0), add(k[31], w1));
w2 = schedule!(w4, w5, w8, w9, w1);
w3 = schedule!(w5, w6, w9, w0, w2);
rounds4!(ae, bf, cg, dh, add(k[32], w2), add(k[33], w3));
w4 = schedule!(w6, w7, w0, w1, w3);
w5 = schedule!(w7, w8, w1, w2, w4);
rounds4!(ae, bf, cg, dh, add(k[34], w4), add(k[35], w5));
w6 = schedule!(w8, w9, w2, w3, w5);
w7 = schedule!(w9, w0, w3, w4, w6);
rounds4!(ae, bf, cg, dh, add(k[36], w6), add(k[37], w7));
w8 = schedule!(w0, w1, w4, w5, w7);
w9 = schedule!(w1, w2, w5, w6, w8);
rounds4!(ae, bf, cg, dh, add(k[38], w8), add(k[39], w9));
let [a, e] = ae;
let [b, f] = bf;
let [c, g] = cg;
let [d, h] = dh;
state[0] = state[0].wrapping_add(a);
state[1] = state[1].wrapping_add(b);
state[2] = state[2].wrapping_add(c);
state[3] = state[3].wrapping_add(d);
state[4] = state[4].wrapping_add(e);
state[5] = state[5].wrapping_add(f);
state[6] = state[6].wrapping_add(g);
state[7] = state[7].wrapping_add(h);
}
pub fn compress(state: &mut [u64; 8], blocks: &[[u8; 128]]) {
let mut block_u32 = [0u64; BLOCK_LEN];
// since LLVM can't properly use aliasing yet it will make
// unnecessary state stores without this copy
let mut state_cpy = *state;
for block in blocks {
for (o, chunk) in block_u32.iter_mut().zip(block.chunks_exact(8)) {
*o = u64::from_be_bytes(chunk.try_into().unwrap());
}
sha512_digest_block_u64(&mut state_cpy, &block_u32);
}
*state = state_cpy;
}

357
zeroidc/vendor/sha2/src/sha512/x86.rs vendored Normal file
View File

@@ -0,0 +1,357 @@
//! SHA-512 `x86`/`x86_64` backend
#![allow(clippy::many_single_char_names)]
use core::mem::size_of;
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
use crate::consts::K64;
cpufeatures::new!(avx2_cpuid, "avx2");
pub fn compress(state: &mut [u64; 8], blocks: &[[u8; 128]]) {
// TODO: Replace with https://github.com/rust-lang/rfcs/pull/2725
// after stabilization
if avx2_cpuid::get() {
unsafe {
sha512_compress_x86_64_avx2(state, blocks);
}
} else {
super::soft::compress(state, blocks);
}
}
#[target_feature(enable = "avx2")]
unsafe fn sha512_compress_x86_64_avx2(state: &mut [u64; 8], blocks: &[[u8; 128]]) {
let mut start_block = 0;
if blocks.len() & 0b1 != 0 {
sha512_compress_x86_64_avx(state, &blocks[0]);
start_block += 1;
}
let mut ms: MsgSchedule = [_mm_setzero_si128(); 8];
let mut t2: RoundStates = [_mm_setzero_si128(); 40];
let mut x = [_mm256_setzero_si256(); 8];
for i in (start_block..blocks.len()).step_by(2) {
load_data_avx2(&mut x, &mut ms, &mut t2, blocks.as_ptr().add(i) as *const _);
// First block
let mut current_state = *state;
rounds_0_63_avx2(&mut current_state, &mut x, &mut ms, &mut t2);
rounds_64_79(&mut current_state, &ms);
accumulate_state(state, &current_state);
// Second block
current_state = *state;
process_second_block(&mut current_state, &t2);
accumulate_state(state, &current_state);
}
}
#[inline(always)]
unsafe fn sha512_compress_x86_64_avx(state: &mut [u64; 8], block: &[u8; 128]) {
let mut ms = [_mm_setzero_si128(); 8];
let mut x = [_mm_setzero_si128(); 8];
// Reduced to single iteration
let mut current_state = *state;
load_data_avx(&mut x, &mut ms, block.as_ptr() as *const _);
rounds_0_63_avx(&mut current_state, &mut x, &mut ms);
rounds_64_79(&mut current_state, &ms);
accumulate_state(state, &current_state);
}
#[inline(always)]
unsafe fn load_data_avx(x: &mut [__m128i; 8], ms: &mut MsgSchedule, data: *const __m128i) {
#[allow(non_snake_case)]
let MASK = _mm_setr_epi32(0x04050607, 0x00010203, 0x0c0d0e0f, 0x08090a0b);
macro_rules! unrolled_iterations {
($($i:literal),*) => {$(
x[$i] = _mm_loadu_si128(data.add($i) as *const _);
x[$i] = _mm_shuffle_epi8(x[$i], MASK);
let y = _mm_add_epi64(
x[$i],
_mm_loadu_si128(&K64[2 * $i] as *const u64 as *const _),
);
ms[$i] = y;
)*};
}
unrolled_iterations!(0, 1, 2, 3, 4, 5, 6, 7);
}
#[inline(always)]
unsafe fn load_data_avx2(
x: &mut [__m256i; 8],
ms: &mut MsgSchedule,
t2: &mut RoundStates,
data: *const __m128i,
) {
#[allow(non_snake_case)]
let MASK = _mm256_set_epi64x(
0x0809_0A0B_0C0D_0E0F_i64,
0x0001_0203_0405_0607_i64,
0x0809_0A0B_0C0D_0E0F_i64,
0x0001_0203_0405_0607_i64,
);
macro_rules! unrolled_iterations {
($($i:literal),*) => {$(
x[$i] = _mm256_insertf128_si256(x[$i], _mm_loadu_si128(data.add(8 + $i) as *const _), 1);
x[$i] = _mm256_insertf128_si256(x[$i], _mm_loadu_si128(data.add($i) as *const _), 0);
x[$i] = _mm256_shuffle_epi8(x[$i], MASK);
let t = _mm_loadu_si128(K64.as_ptr().add($i * 2) as *const u64 as *const _);
let y = _mm256_add_epi64(x[$i], _mm256_set_m128i(t, t));
ms[$i] = _mm256_extracti128_si256(y, 0);
t2[$i] = _mm256_extracti128_si256(y, 1);
)*};
}
unrolled_iterations!(0, 1, 2, 3, 4, 5, 6, 7);
}
#[inline(always)]
unsafe fn rounds_0_63_avx(current_state: &mut State, x: &mut [__m128i; 8], ms: &mut MsgSchedule) {
let mut k64_idx: usize = SHA512_BLOCK_WORDS_NUM;
for _ in 0..4 {
for j in 0..8 {
let k64 = _mm_loadu_si128(&K64[k64_idx] as *const u64 as *const _);
let y = sha512_update_x_avx(x, k64);
{
let ms = cast_ms(ms);
sha_round(current_state, ms[2 * j]);
sha_round(current_state, ms[2 * j + 1]);
}
ms[j] = y;
k64_idx += 2;
}
}
}
#[inline(always)]
unsafe fn rounds_0_63_avx2(
current_state: &mut State,
x: &mut [__m256i; 8],
ms: &mut MsgSchedule,
t2: &mut RoundStates,
) {
let mut k64x4_idx: usize = SHA512_BLOCK_WORDS_NUM;
for i in 1..5 {
for j in 0..8 {
let t = _mm_loadu_si128(K64.as_ptr().add(k64x4_idx) as *const u64 as *const _);
let y = sha512_update_x_avx2(x, _mm256_set_m128i(t, t));
{
let ms = cast_ms(ms);
sha_round(current_state, ms[2 * j]);
sha_round(current_state, ms[2 * j + 1]);
}
ms[j] = _mm256_extracti128_si256(y, 0);
t2[8 * i + j] = _mm256_extracti128_si256(y, 1);
k64x4_idx += 2;
}
}
}
#[inline(always)]
fn rounds_64_79(current_state: &mut State, ms: &MsgSchedule) {
let ms = cast_ms(ms);
for i in 64..80 {
sha_round(current_state, ms[i & 0xf]);
}
}
#[inline(always)]
fn process_second_block(current_state: &mut State, t2: &RoundStates) {
for t2 in cast_rs(t2).iter() {
sha_round(current_state, *t2);
}
}
#[inline(always)]
fn sha_round(s: &mut State, x: u64) {
macro_rules! big_sigma0 {
($a:expr) => {
$a.rotate_right(28) ^ $a.rotate_right(34) ^ $a.rotate_right(39)
};
}
macro_rules! big_sigma1 {
($a:expr) => {
$a.rotate_right(14) ^ $a.rotate_right(18) ^ $a.rotate_right(41)
};
}
macro_rules! bool3ary_202 {
($a:expr, $b:expr, $c:expr) => {
$c ^ ($a & ($b ^ $c))
};
} // Choose, MD5F, SHA1C
macro_rules! bool3ary_232 {
($a:expr, $b:expr, $c:expr) => {
($a & $b) ^ ($a & $c) ^ ($b & $c)
};
} // Majority, SHA1M
macro_rules! rotate_state {
($s:ident) => {{
let tmp = $s[7];
$s[7] = $s[6];
$s[6] = $s[5];
$s[5] = $s[4];
$s[4] = $s[3];
$s[3] = $s[2];
$s[2] = $s[1];
$s[1] = $s[0];
$s[0] = tmp;
}};
}
let t = x
.wrapping_add(s[7])
.wrapping_add(big_sigma1!(s[4]))
.wrapping_add(bool3ary_202!(s[4], s[5], s[6]));
s[7] = t
.wrapping_add(big_sigma0!(s[0]))
.wrapping_add(bool3ary_232!(s[0], s[1], s[2]));
s[3] = s[3].wrapping_add(t);
rotate_state!(s);
}
#[inline(always)]
fn accumulate_state(dst: &mut State, src: &State) {
for i in 0..SHA512_HASH_WORDS_NUM {
dst[i] = dst[i].wrapping_add(src[i]);
}
}
macro_rules! fn_sha512_update_x {
($name:ident, $ty:ident, {
ADD64 = $ADD64:ident,
ALIGNR8 = $ALIGNR8:ident,
SRL64 = $SRL64:ident,
SLL64 = $SLL64:ident,
XOR = $XOR:ident,
}) => {
unsafe fn $name(x: &mut [$ty; 8], k64: $ty) -> $ty {
// q[2:1]
let mut t0 = $ALIGNR8(x[1], x[0], 8);
// q[10:9]
let mut t3 = $ALIGNR8(x[5], x[4], 8);
// q[2:1] >> s0[0]
let mut t2 = $SRL64(t0, 1);
// q[1:0] + q[10:9]
x[0] = $ADD64(x[0], t3);
// q[2:1] >> s0[2]
t3 = $SRL64(t0, 7);
// q[2:1] << (64 - s0[1])
let mut t1 = $SLL64(t0, 64 - 8);
// (q[2:1] >> s0[2]) ^
// (q[2:1] >> s0[0])
t0 = $XOR(t3, t2);
// q[2:1] >> s0[1]
t2 = $SRL64(t2, 8 - 1);
// (q[2:1] >> s0[2]) ^
// (q[2:1] >> s0[0]) ^
// q[2:1] << (64 - s0[1])
t0 = $XOR(t0, t1);
// q[2:1] << (64 - s0[0])
t1 = $SLL64(t1, 8 - 1);
// sigma1(q[2:1])
t0 = $XOR(t0, t2);
t0 = $XOR(t0, t1);
// q[15:14] >> s1[2]
t3 = $SRL64(x[7], 6);
// q[15:14] >> (64 - s1[1])
t2 = $SLL64(x[7], 64 - 61);
// q[1:0] + sigma0(q[2:1])
x[0] = $ADD64(x[0], t0);
// q[15:14] >> s1[0]
t1 = $SRL64(x[7], 19);
// q[15:14] >> s1[2] ^
// q[15:14] >> (64 - s1[1])
t3 = $XOR(t3, t2);
// q[15:14] >> (64 - s1[0])
t2 = $SLL64(t2, 61 - 19);
// q[15:14] >> s1[2] ^
// q[15:14] >> (64 - s1[1] ^
// q[15:14] >> s1[0]
t3 = $XOR(t3, t1);
// q[15:14] >> s1[1]
t1 = $SRL64(t1, 61 - 19);
// sigma1(q[15:14])
t3 = $XOR(t3, t2);
t3 = $XOR(t3, t1);
// q[1:0] + q[10:9] + sigma1(q[15:14]) + sigma0(q[2:1])
x[0] = $ADD64(x[0], t3);
// rotate
let temp = x[0];
x[0] = x[1];
x[1] = x[2];
x[2] = x[3];
x[3] = x[4];
x[4] = x[5];
x[5] = x[6];
x[6] = x[7];
x[7] = temp;
$ADD64(x[7], k64)
}
};
}
fn_sha512_update_x!(sha512_update_x_avx, __m128i, {
ADD64 = _mm_add_epi64,
ALIGNR8 = _mm_alignr_epi8,
SRL64 = _mm_srli_epi64,
SLL64 = _mm_slli_epi64,
XOR = _mm_xor_si128,
});
fn_sha512_update_x!(sha512_update_x_avx2, __m256i, {
ADD64 = _mm256_add_epi64,
ALIGNR8 = _mm256_alignr_epi8,
SRL64 = _mm256_srli_epi64,
SLL64 = _mm256_slli_epi64,
XOR = _mm256_xor_si256,
});
#[inline(always)]
fn cast_ms(ms: &MsgSchedule) -> &[u64; SHA512_BLOCK_WORDS_NUM] {
unsafe { &*(ms as *const MsgSchedule as *const _) }
}
#[inline(always)]
fn cast_rs(rs: &RoundStates) -> &[u64; SHA512_ROUNDS_NUM] {
unsafe { &*(rs as *const RoundStates as *const _) }
}
type State = [u64; SHA512_HASH_WORDS_NUM];
type MsgSchedule = [__m128i; SHA512_BLOCK_WORDS_NUM / 2];
type RoundStates = [__m128i; SHA512_ROUNDS_NUM / 2];
const SHA512_BLOCK_BYTE_LEN: usize = 128;
const SHA512_ROUNDS_NUM: usize = 80;
const SHA512_HASH_BYTE_LEN: usize = 64;
const SHA512_HASH_WORDS_NUM: usize = SHA512_HASH_BYTE_LEN / size_of::<u64>();
const SHA512_BLOCK_WORDS_NUM: usize = SHA512_BLOCK_BYTE_LEN / size_of::<u64>();