From 79e5e8b052b56f8c6fc07d8407fcfc3aaf39bab3 Mon Sep 17 00:00:00 2001 From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com> Date: Tue, 13 Feb 2024 10:11:54 -0500 Subject: [PATCH 01/23] Add cryp configuration. --- embassy-stm32/src/cryp/mod.rs | 227 ++++++++++++++++++++++++++++++++++ embassy-stm32/src/lib.rs | 2 + 2 files changed, 229 insertions(+) create mode 100644 embassy-stm32/src/cryp/mod.rs diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs new file mode 100644 index 000000000..dedc6ddc5 --- /dev/null +++ b/embassy-stm32/src/cryp/mod.rs @@ -0,0 +1,227 @@ +use embassy_hal_internal::{into_ref, PeripheralRef}; +use pac::cryp::Init; + +use crate::pac; +use crate::peripherals::CRYP; +use crate::rcc::sealed::RccPeripheral; +use crate::{interrupt, peripherals, Peripheral}; + +pub struct Context<'c> { + key: &'c [u8], +} + +#[derive(PartialEq)] +pub enum Algorithm { + AES, + DES, + TDES, +} + +#[derive(PartialEq)] +pub enum Mode { + ECB, + CBC, + CTR, + GCM, + GMAC, + CCM, +} + +#[derive(PartialEq)] +pub enum Direction { + Encrypt, + Decrypt, +} + +/// Crypto Accelerator Driver +pub struct Cryp<'d, T: Instance, In, Out> { + _peripheral: PeripheralRef<'d, T>, + indma: PeripheralRef<'d, In>, + outdma: PeripheralRef<'d, Out>, +} + +type InitVector<'v> = Option<&'v [u8]>; + +impl<'d, T: Instance, In, Out> Cryp<'d, T, In, Out> { + /// Create a new CRYP driver. + pub fn new( + peri: impl Peripheral
+ 'd, + indma: impl Peripheral
+ 'd, + outdma: impl Peripheral
+ 'd, + ) -> Self { + CRYP::enable_and_reset(); + into_ref!(peri, indma, outdma); + let instance = Self { + _peripheral: peri, + indma: indma, + outdma: outdma, + }; + instance + } + + /// Start a new cipher operation. + /// Key size must be 128, 192, or 256 bits. + pub fn start(key: &[u8], iv: InitVector, algo: Algorithm, mode: Mode, dir: Direction) -> Context { + T::regs().cr().modify(|w| w.set_crypen(false)); + + let keylen = key.len() * 8; + let ivlen; + if let Some(iv) = iv { + ivlen = iv.len() * 8; + } else { + ivlen = 0; + } + + // Checks for correctness + if algo == Algorithm::AES { + match keylen { + 128 => T::regs().cr().write(|w| w.set_keysize(0)), + 192 => T::regs().cr().write(|w| w.set_keysize(1)), + 256 => T::regs().cr().write(|w| w.set_keysize(2)), + _ => panic!("Key length must be 128, 192, or 256 bits."), + } + + if (mode == Mode::GCM) && (ivlen != 96) { + panic!("IV length must be 96 bits for GCM."); + } else if (mode == Mode::CBC) && (ivlen != 128) { + panic!("IV length must be 128 bits for CBC."); + } else if (mode == Mode::CCM) && (ivlen != 128) { + panic!("IV length must be 128 bits for CCM."); + } else if (mode == Mode::CTR) && (ivlen != 64) { + panic!("IV length must be 64 bits for CTR."); + } else if (mode == Mode::GCM) && (ivlen != 96) { + panic!("IV length must be 96 bits for GCM."); + } else if (mode == Mode::GMAC) && (ivlen != 96) { + panic!("IV length must be 96 bits for GMAC."); + } + } + + // Load the key into the registers. + let mut keyidx = 0; + let mut keyword: [u8; 4] = [0; 4]; + if keylen > 192 { + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(0).klr().write_value(u32::from_be_bytes(keyword)); + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(0).krr().write_value(u32::from_be_bytes(keyword)); + } + if keylen > 128 { + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(1).klr().write_value(u32::from_be_bytes(keyword)); + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(1).krr().write_value(u32::from_be_bytes(keyword)); + } + if keylen > 64 { + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(2).klr().write_value(u32::from_be_bytes(keyword)); + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(2).krr().write_value(u32::from_be_bytes(keyword)); + } + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(3).klr().write_value(u32::from_be_bytes(keyword)); + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + T::regs().key(3).krr().write_value(u32::from_be_bytes(keyword)); + + // Set data type to 8-bit. This will match software implementations. + T::regs().cr().modify(|w| w.set_datatype(2)); + + if algo == Algorithm::AES { + if (mode == Mode::ECB) || (mode == Mode::CBC) { + T::regs().cr().modify(|w| w.set_algomode0(7)); + T::regs().cr().modify(|w| w.set_crypen(true)); + while T::regs().sr().read().busy() {} + } + + match mode { + Mode::ECB => T::regs().cr().modify(|w| w.set_algomode0(4)), + Mode::CBC => T::regs().cr().modify(|w| w.set_algomode0(5)), + Mode::CTR => T::regs().cr().modify(|w| w.set_algomode0(6)), + Mode::GCM => T::regs().cr().modify(|w| w.set_algomode0(8)), + Mode::GMAC => T::regs().cr().modify(|w| w.set_algomode0(8)), + Mode::CCM => T::regs().cr().modify(|w| w.set_algomode0(9)), + } + } else if algo == Algorithm::DES { + match mode { + Mode::ECB => T::regs().cr().modify(|w| w.set_algomode0(2)), + Mode::CBC => T::regs().cr().modify(|w| w.set_algomode0(3)), + _ => panic!("Only ECB and CBC modes are valid for DES."), + } + } else if algo == Algorithm::TDES { + match mode { + Mode::ECB => T::regs().cr().modify(|w| w.set_algomode0(0)), + Mode::CBC => T::regs().cr().modify(|w| w.set_algomode0(1)), + _ => panic!("Only ECB and CBC modes are valid for TDES."), + } + } + + // Set encrypt/decrypt + if dir == Direction::Encrypt { + T::regs().cr().modify(|w| w.set_algodir(false)); + } else { + T::regs().cr().modify(|w| w.set_algodir(true)); + } + + // Load the IV into the registers. + if let Some(iv) = iv { + let mut iv_idx = 0; + let mut iv_word: [u8; 4] = [0; 4]; + iv_word.copy_from_slice(&iv[iv_idx..iv_idx + 4]); + iv_idx += 4; + T::regs().init(0).ivlr().write_value(u32::from_be_bytes(iv_word)); + iv_word.copy_from_slice(&iv[iv_idx..iv_idx + 4]); + iv_idx += 4; + if iv.len() >= 12 { + T::regs().init(0).ivrr().write_value(u32::from_be_bytes(iv_word)); + iv_word.copy_from_slice(&iv[iv_idx..iv_idx + 4]); + iv_idx += 4; + } + if iv.len() >= 16 { + T::regs().init(1).ivlr().write_value(u32::from_be_bytes(iv_word)); + iv_word.copy_from_slice(&iv[iv_idx..iv_idx + 4]); + T::regs().init(1).ivrr().write_value(u32::from_be_bytes(iv_word)); + } + } + + // Flush in/out FIFOs + T::regs().cr().modify(|w| w.fflush()); + + let ctx = Context { key: key }; + + ctx + } +} + +pub(crate) mod sealed { + use super::*; + + pub trait Instance { + fn regs() -> pac::cryp::Cryp; + } +} + +/// RNG instance trait. +pub trait Instance: sealed::Instance + Peripheral
+ crate::rcc::RccPeripheral + 'static + Send { + /// Interrupt for this RNG instance. + type Interrupt: interrupt::typelevel::Interrupt; +} + +foreach_interrupt!( + ($inst:ident, rng, CRYP, GLOBAL, $irq:ident) => { + impl Instance for peripherals::$inst { + type Interrupt = crate::interrupt::typelevel::$irq; + } + + impl sealed::Instance for peripherals::$inst { + fn regs() -> crate::pac::cryp::Cryp { + crate::pac::$inst + } + } + }; +); diff --git a/embassy-stm32/src/lib.rs b/embassy-stm32/src/lib.rs index cd1ede0fa..6859eef6c 100644 --- a/embassy-stm32/src/lib.rs +++ b/embassy-stm32/src/lib.rs @@ -34,6 +34,8 @@ pub mod adc; pub mod can; #[cfg(crc)] pub mod crc; +#[cfg(cryp)] +pub mod cryp; #[cfg(dac)] pub mod dac; #[cfg(dcmi)] From a0a8a4ec864763948d4a965ccf8ec11ca91cb15f Mon Sep 17 00:00:00 2001 From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com> Date: Wed, 14 Feb 2024 20:24:52 -0500 Subject: [PATCH 02/23] Support CBC, ECB, CTR modes. --- embassy-stm32/src/cryp/mod.rs | 350 +++++++++++++++++++++++++++------- 1 file changed, 282 insertions(+), 68 deletions(-) diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs index dedc6ddc5..f266313c1 100644 --- a/embassy-stm32/src/cryp/mod.rs +++ b/embassy-stm32/src/cryp/mod.rs @@ -1,23 +1,34 @@ use embassy_hal_internal::{into_ref, PeripheralRef}; -use pac::cryp::Init; use crate::pac; use crate::peripherals::CRYP; use crate::rcc::sealed::RccPeripheral; -use crate::{interrupt, peripherals, Peripheral}; +use crate::{interrupt, Peripheral}; + +const DES_BLOCK_SIZE: usize = 8; // 64 bits +const AES_BLOCK_SIZE: usize = 16; // 128 bits pub struct Context<'c> { + algo: Algorithm, + mode: Mode, + dir: Direction, + last_block_processed: bool, + aad_complete: bool, + cr: u32, + iv: [u32; 4], key: &'c [u8], + csgcmccm: [u32; 8], + csgcm: [u32; 8], } -#[derive(PartialEq)] +#[derive(PartialEq, Clone, Copy)] pub enum Algorithm { AES, DES, TDES, } -#[derive(PartialEq)] +#[derive(PartialEq, Clone, Copy)] pub enum Mode { ECB, CBC, @@ -27,53 +38,55 @@ pub enum Mode { CCM, } -#[derive(PartialEq)] +#[derive(PartialEq, Clone, Copy)] pub enum Direction { Encrypt, Decrypt, } /// Crypto Accelerator Driver -pub struct Cryp<'d, T: Instance, In, Out> { +pub struct Cryp<'d, T: Instance> { _peripheral: PeripheralRef<'d, T>, - indma: PeripheralRef<'d, In>, - outdma: PeripheralRef<'d, Out>, } type InitVector<'v> = Option<&'v [u8]>; -impl<'d, T: Instance, In, Out> Cryp<'d, T, In, Out> { +impl<'d, T: Instance> Cryp<'d, T> { /// Create a new CRYP driver. - pub fn new( - peri: impl Peripheral
+ 'd, - indma: impl Peripheral
+ 'd, - outdma: impl Peripheral
+ 'd, - ) -> Self { + pub fn new(peri: impl Peripheral
+ 'd) -> Self { CRYP::enable_and_reset(); - into_ref!(peri, indma, outdma); - let instance = Self { - _peripheral: peri, - indma: indma, - outdma: outdma, - }; + into_ref!(peri); + let instance = Self { _peripheral: peri }; instance } /// Start a new cipher operation. /// Key size must be 128, 192, or 256 bits. - pub fn start(key: &[u8], iv: InitVector, algo: Algorithm, mode: Mode, dir: Direction) -> Context { - T::regs().cr().modify(|w| w.set_crypen(false)); + pub fn start<'c>(&self, key: &'c [u8], iv: InitVector, algo: Algorithm, mode: Mode, dir: Direction) -> Context<'c> { + let mut ctx = Context { + algo, + mode, + dir, + last_block_processed: false, + cr: 0, + iv: [0; 4], + key, + csgcmccm: [0; 8], + csgcm: [0; 8], + aad_complete: false, + }; - let keylen = key.len() * 8; - let ivlen; - if let Some(iv) = iv { - ivlen = iv.len() * 8; - } else { - ivlen = 0; - } + T::regs().cr().modify(|w| w.set_crypen(false)); // Checks for correctness if algo == Algorithm::AES { + let keylen = key.len() * 8; + let ivlen; + if let Some(iv) = iv { + ivlen = iv.len() * 8; + } else { + ivlen = 0; + } match keylen { 128 => T::regs().cr().write(|w| w.set_keysize(0)), 192 => T::regs().cr().write(|w| w.set_keysize(1)), @@ -96,49 +109,14 @@ impl<'d, T: Instance, In, Out> Cryp<'d, T, In, Out> { } } - // Load the key into the registers. - let mut keyidx = 0; - let mut keyword: [u8; 4] = [0; 4]; - if keylen > 192 { - keyword.copy_from_slice(&key[keyidx..keyidx + 4]); - keyidx += 4; - T::regs().key(0).klr().write_value(u32::from_be_bytes(keyword)); - keyword.copy_from_slice(&key[keyidx..keyidx + 4]); - keyidx += 4; - T::regs().key(0).krr().write_value(u32::from_be_bytes(keyword)); - } - if keylen > 128 { - keyword.copy_from_slice(&key[keyidx..keyidx + 4]); - keyidx += 4; - T::regs().key(1).klr().write_value(u32::from_be_bytes(keyword)); - keyword.copy_from_slice(&key[keyidx..keyidx + 4]); - keyidx += 4; - T::regs().key(1).krr().write_value(u32::from_be_bytes(keyword)); - } - if keylen > 64 { - keyword.copy_from_slice(&key[keyidx..keyidx + 4]); - keyidx += 4; - T::regs().key(2).klr().write_value(u32::from_be_bytes(keyword)); - keyword.copy_from_slice(&key[keyidx..keyidx + 4]); - keyidx += 4; - T::regs().key(2).krr().write_value(u32::from_be_bytes(keyword)); - } - keyword.copy_from_slice(&key[keyidx..keyidx + 4]); - keyidx += 4; - T::regs().key(3).klr().write_value(u32::from_be_bytes(keyword)); - keyword.copy_from_slice(&key[keyidx..keyidx + 4]); - T::regs().key(3).krr().write_value(u32::from_be_bytes(keyword)); + self.load_key(key); // Set data type to 8-bit. This will match software implementations. T::regs().cr().modify(|w| w.set_datatype(2)); - if algo == Algorithm::AES { - if (mode == Mode::ECB) || (mode == Mode::CBC) { - T::regs().cr().modify(|w| w.set_algomode0(7)); - T::regs().cr().modify(|w| w.set_crypen(true)); - while T::regs().sr().read().busy() {} - } + self.prepare_key(&ctx); + if algo == Algorithm::AES { match mode { Mode::ECB => T::regs().cr().modify(|w| w.set_algomode0(4)), Mode::CBC => T::regs().cr().modify(|w| w.set_algomode0(5)), @@ -192,10 +170,246 @@ impl<'d, T: Instance, In, Out> Cryp<'d, T, In, Out> { // Flush in/out FIFOs T::regs().cr().modify(|w| w.fflush()); - let ctx = Context { key: key }; + if mode == Mode::GCM { + // GCM init phase + T::regs().cr().modify(|w| w.set_gcm_ccmph(0)); + T::regs().cr().modify(|w| w.set_crypen(true)); + while T::regs().cr().read().crypen() {} + } + + self.store_context(&mut ctx); ctx } + + // pub fn aad_blocking(&self, ctx: &mut Context, aad: &[u8]) { + // if ctx.aad_complete { + // panic!("Cannot update AAD after calling 'update'!") + // } + // if (ctx.mode != Mode::GCM) && (ctx.mode != Mode::GMAC) && (ctx.mode != Mode::CCM) { + // panic!("Associated data only valid for GCM, GMAC, and CCM modes.") + // } + + // let mut header_size = 0; + // let mut header: [u8;] + + // if aad.len() < 65280 { + + // } + + // // GCM header phase + // T::regs().cr().modify(|w| w.set_gcm_ccmph(1)); + // T::regs().cr().modify(|w| w.set_crypen(true)); + // } + + pub fn update_blocking(&self, ctx: &mut Context, input: &[u8], output: &mut [u8], last_block: bool) { + self.load_context(ctx); + + ctx.aad_complete = true; + if last_block { + ctx.last_block_processed = true; + } + + let block_size; + if ctx.algo == Algorithm::DES { + block_size = 8; + } else { + block_size = 16; + } + let last_block_remainder = input.len() % block_size; + + // Perform checks for correctness. + + if ctx.mode == Mode::GMAC { + panic!("GMAC works on header data only. Do not call this function for GMAC."); + } + if ctx.last_block_processed { + panic!("The last block has already been processed!"); + } + if input.len() != output.len() { + panic!("Output buffer length must match input length."); + } + if !last_block { + if last_block_remainder != 0 { + panic!("Input length must be a multiple of {block_size} bytes."); + } + } + if (ctx.mode == Mode::ECB) || (ctx.mode == Mode::CBC) { + if last_block_remainder != 0 { + panic!("Input must be a multiple of {block_size} bytes in ECB and CBC modes. Consider padding or ciphertext stealing."); + } + } + + // Load data into core, block by block. + let num_full_blocks = input.len() / block_size; + for block in 0..num_full_blocks { + let mut index = block * block_size; + let end_index = index + block_size; + // Write block in + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&input[index..index + 4]); + T::regs().din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + let mut index = block * block_size; + let end_index = index + block_size; + // Block until there is output to read. + while !T::regs().sr().read().ofne() {} + // Read block out + while index < end_index { + let out_word: u32 = T::regs().dout().read(); + output[index..index + 4].copy_from_slice(u32::to_ne_bytes(out_word).as_slice()); + index += 4; + } + } + + // Handle the final block, which is incomplete. + if last_block_remainder > 0 { + if ctx.mode == Mode::GCM && ctx.dir == Direction::Encrypt { + //Handle special GCM partial block process. + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().write(|w| w.set_algomode0(6)); + let iv1r = T::regs().csgcmccmr(7).read() - 1; + T::regs().init(1).ivrr().write_value(iv1r); + T::regs().cr().modify(|w| w.set_crypen(true)); + } + + let mut intermediate_data: [u8; 16] = [0; 16]; + let mut last_block: [u8; 16] = [0; 16]; + last_block.copy_from_slice(&input[input.len() - last_block_remainder..input.len()]); + let mut index = 0; + let end_index = block_size; + // Write block in + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&last_block[index..index + 4]); + T::regs().din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + let mut index = 0; + let end_index = block_size; + // Block until there is output to read. + while !T::regs().sr().read().ofne() {} + // Read block out + while index < end_index { + let out_word: u32 = T::regs().dout().read(); + intermediate_data[index..index + 4].copy_from_slice(u32::to_ne_bytes(out_word).as_slice()); + index += 4; + } + + // Handle the last block depending on mode. + output[output.len() - last_block_remainder..output.len()] + .copy_from_slice(&intermediate_data[0..last_block_remainder]); + + if ctx.mode == Mode::GCM && ctx.dir == Direction::Encrypt { + //Handle special GCM partial block process. + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().write(|w| w.set_algomode0(8)); + T::regs().init(1).ivrr().write_value(2); + T::regs().cr().modify(|w| w.set_crypen(true)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(3)); + let mut index = 0; + let end_index = block_size; + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&intermediate_data[index..index + 4]); + T::regs().din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + for _ in 0..4 { + T::regs().dout().read(); + } + } + } + } + + fn prepare_key(&self, ctx: &Context) { + if ctx.algo == Algorithm::AES { + if (ctx.mode == Mode::ECB) || (ctx.mode == Mode::CBC) { + T::regs().cr().modify(|w| w.set_algomode0(7)); + T::regs().cr().modify(|w| w.set_crypen(true)); + while T::regs().sr().read().busy() {} + } + } + } + + fn load_key(&self, key: &[u8]) { + // Load the key into the registers. + let mut keyidx = 0; + let mut keyword: [u8; 4] = [0; 4]; + let keylen = key.len() * 8; + if keylen > 192 { + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(0).klr().write_value(u32::from_be_bytes(keyword)); + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(0).krr().write_value(u32::from_be_bytes(keyword)); + } + if keylen > 128 { + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(1).klr().write_value(u32::from_be_bytes(keyword)); + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(1).krr().write_value(u32::from_be_bytes(keyword)); + } + if keylen > 64 { + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(2).klr().write_value(u32::from_be_bytes(keyword)); + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(2).krr().write_value(u32::from_be_bytes(keyword)); + } + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + keyidx += 4; + T::regs().key(3).klr().write_value(u32::from_be_bytes(keyword)); + keyword.copy_from_slice(&key[keyidx..keyidx + 4]); + T::regs().key(3).krr().write_value(u32::from_be_bytes(keyword)); + } + + fn store_context(&self, ctx: &mut Context) { + // Wait for data block processing to finish. + while !T::regs().sr().read().ifem() {} + while T::regs().sr().read().ofne() {} + while T::regs().sr().read().busy() {} + + // Disable crypto processor. + T::regs().cr().modify(|w| w.set_crypen(false)); + + // Save the peripheral state. + ctx.cr = T::regs().cr().read().0; + ctx.iv[0] = T::regs().init(0).ivlr().read(); + ctx.iv[1] = T::regs().init(0).ivrr().read(); + ctx.iv[2] = T::regs().init(1).ivlr().read(); + ctx.iv[3] = T::regs().init(1).ivrr().read(); + for i in 0..8 { + ctx.csgcmccm[i] = T::regs().csgcmccmr(i).read(); + ctx.csgcm[i] = T::regs().csgcmr(i).read(); + } + } + + fn load_context(&self, ctx: &Context) { + // Reload state registers. + T::regs().cr().write(|w| w.0 = ctx.cr); + T::regs().init(0).ivlr().write_value(ctx.iv[0]); + T::regs().init(0).ivrr().write_value(ctx.iv[1]); + T::regs().init(1).ivlr().write_value(ctx.iv[2]); + T::regs().init(1).ivrr().write_value(ctx.iv[3]); + for i in 0..8 { + T::regs().csgcmccmr(i).write_value(ctx.csgcmccm[i]); + T::regs().csgcmr(i).write_value(ctx.csgcm[i]); + } + self.load_key(ctx.key); + + // Prepare key if applicable. + self.prepare_key(ctx); + + // Enable crypto processor. + T::regs().cr().modify(|w| w.set_crypen(true)); + } } pub(crate) mod sealed { From 72e4cacd914195352c9760856e8b8e40a7851752 Mon Sep 17 00:00:00 2001 From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com> Date: Wed, 14 Feb 2024 22:11:38 -0500 Subject: [PATCH 03/23] CBC and ECB AES modes functional. --- embassy-stm32/src/cryp/mod.rs | 39 +++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs index f266313c1..b368930da 100644 --- a/embassy-stm32/src/cryp/mod.rs +++ b/embassy-stm32/src/cryp/mod.rs @@ -3,7 +3,7 @@ use embassy_hal_internal::{into_ref, PeripheralRef}; use crate::pac; use crate::peripherals::CRYP; use crate::rcc::sealed::RccPeripheral; -use crate::{interrupt, Peripheral}; +use crate::{interrupt, peripherals, Peripheral}; const DES_BLOCK_SIZE: usize = 8; // 64 bits const AES_BLOCK_SIZE: usize = 16; // 128 bits @@ -49,7 +49,7 @@ pub struct Cryp<'d, T: Instance> { _peripheral: PeripheralRef<'d, T>, } -type InitVector<'v> = Option<&'v [u8]>; +pub type InitVector<'v> = Option<&'v [u8]>; impl<'d, T: Instance> Cryp<'d, T> { /// Create a new CRYP driver. @@ -88,9 +88,9 @@ impl<'d, T: Instance> Cryp<'d, T> { ivlen = 0; } match keylen { - 128 => T::regs().cr().write(|w| w.set_keysize(0)), - 192 => T::regs().cr().write(|w| w.set_keysize(1)), - 256 => T::regs().cr().write(|w| w.set_keysize(2)), + 128 => T::regs().cr().modify(|w| w.set_keysize(0)), + 192 => T::regs().cr().modify(|w| w.set_keysize(1)), + 256 => T::regs().cr().modify(|w| w.set_keysize(2)), _ => panic!("Key length must be 128, 192, or 256 bits."), } @@ -155,13 +155,13 @@ impl<'d, T: Instance> Cryp<'d, T> { T::regs().init(0).ivlr().write_value(u32::from_be_bytes(iv_word)); iv_word.copy_from_slice(&iv[iv_idx..iv_idx + 4]); iv_idx += 4; + T::regs().init(0).ivrr().write_value(u32::from_be_bytes(iv_word)); if iv.len() >= 12 { - T::regs().init(0).ivrr().write_value(u32::from_be_bytes(iv_word)); iv_word.copy_from_slice(&iv[iv_idx..iv_idx + 4]); iv_idx += 4; + T::regs().init(1).ivlr().write_value(u32::from_be_bytes(iv_word)); } if iv.len() >= 16 { - T::regs().init(1).ivlr().write_value(u32::from_be_bytes(iv_word)); iv_word.copy_from_slice(&iv[iv_idx..iv_idx + 4]); T::regs().init(1).ivrr().write_value(u32::from_be_bytes(iv_word)); } @@ -206,9 +206,6 @@ impl<'d, T: Instance> Cryp<'d, T> { self.load_context(ctx); ctx.aad_complete = true; - if last_block { - ctx.last_block_processed = true; - } let block_size; if ctx.algo == Algorithm::DES { @@ -231,15 +228,19 @@ impl<'d, T: Instance> Cryp<'d, T> { } if !last_block { if last_block_remainder != 0 { - panic!("Input length must be a multiple of {block_size} bytes."); + panic!("Input length must be a multiple of {} bytes.", block_size); } } if (ctx.mode == Mode::ECB) || (ctx.mode == Mode::CBC) { if last_block_remainder != 0 { - panic!("Input must be a multiple of {block_size} bytes in ECB and CBC modes. Consider padding or ciphertext stealing."); + panic!("Input must be a multiple of {} bytes in ECB and CBC modes. Consider padding or ciphertext stealing.", block_size); } } + if last_block { + ctx.last_block_processed = true; + } + // Load data into core, block by block. let num_full_blocks = input.len() / block_size; for block in 0..num_full_blocks { @@ -277,7 +278,7 @@ impl<'d, T: Instance> Cryp<'d, T> { let mut intermediate_data: [u8; 16] = [0; 16]; let mut last_block: [u8; 16] = [0; 16]; - last_block.copy_from_slice(&input[input.len() - last_block_remainder..input.len()]); + last_block[..last_block_remainder].copy_from_slice(&input[input.len() - last_block_remainder..input.len()]); let mut index = 0; let end_index = block_size; // Write block in @@ -299,7 +300,8 @@ impl<'d, T: Instance> Cryp<'d, T> { } // Handle the last block depending on mode. - output[output.len() - last_block_remainder..output.len()] + let output_len = output.len(); + output[output_len - last_block_remainder..output_len] .copy_from_slice(&intermediate_data[0..last_block_remainder]); if ctx.mode == Mode::GCM && ctx.dir == Direction::Encrypt { @@ -325,7 +327,7 @@ impl<'d, T: Instance> Cryp<'d, T> { } fn prepare_key(&self, ctx: &Context) { - if ctx.algo == Algorithm::AES { + if ctx.algo == Algorithm::AES && ctx.dir == Direction::Decrypt { if (ctx.mode == Mode::ECB) || (ctx.mode == Mode::CBC) { T::regs().cr().modify(|w| w.set_algomode0(7)); T::regs().cr().modify(|w| w.set_crypen(true)); @@ -406,6 +408,7 @@ impl<'d, T: Instance> Cryp<'d, T> { // Prepare key if applicable. self.prepare_key(ctx); + T::regs().cr().write(|w| w.0 = ctx.cr); // Enable crypto processor. T::regs().cr().modify(|w| w.set_crypen(true)); @@ -420,14 +423,14 @@ pub(crate) mod sealed { } } -/// RNG instance trait. +/// CRYP instance trait. pub trait Instance: sealed::Instance + Peripheral
+ crate::rcc::RccPeripheral + 'static + Send { - /// Interrupt for this RNG instance. + /// Interrupt for this CRYP instance. type Interrupt: interrupt::typelevel::Interrupt; } foreach_interrupt!( - ($inst:ident, rng, CRYP, GLOBAL, $irq:ident) => { + ($inst:ident, cryp, CRYP, GLOBAL, $irq:ident) => { impl Instance for peripherals::$inst { type Interrupt = crate::interrupt::typelevel::$irq; } From 565acdf24301a72fe084aa18b7c55a6110609374 Mon Sep 17 00:00:00 2001 From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com> Date: Wed, 14 Feb 2024 22:38:05 -0500 Subject: [PATCH 04/23] CTR mode functional. --- embassy-stm32/src/cryp/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs index b368930da..4db95d55c 100644 --- a/embassy-stm32/src/cryp/mod.rs +++ b/embassy-stm32/src/cryp/mod.rs @@ -100,8 +100,8 @@ impl<'d, T: Instance> Cryp<'d, T> { panic!("IV length must be 128 bits for CBC."); } else if (mode == Mode::CCM) && (ivlen != 128) { panic!("IV length must be 128 bits for CCM."); - } else if (mode == Mode::CTR) && (ivlen != 64) { - panic!("IV length must be 64 bits for CTR."); + } else if (mode == Mode::CTR) && (ivlen != 128) { + panic!("IV length must be 128 bits for CTR."); } else if (mode == Mode::GCM) && (ivlen != 96) { panic!("IV length must be 96 bits for GCM."); } else if (mode == Mode::GMAC) && (ivlen != 96) { From c2b03eff62245bd325a781e1e260c150e0a5040c Mon Sep 17 00:00:00 2001 From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com> Date: Fri, 16 Feb 2024 13:15:14 -0500 Subject: [PATCH 05/23] GCM mode functional. --- embassy-stm32/src/cryp/mod.rs | 244 +++++++++++++++++++++++++++------- 1 file changed, 198 insertions(+), 46 deletions(-) diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs index 4db95d55c..447bcf2f8 100644 --- a/embassy-stm32/src/cryp/mod.rs +++ b/embassy-stm32/src/cryp/mod.rs @@ -1,3 +1,4 @@ +//! Crypto Accelerator (CRYP) use embassy_hal_internal::{into_ref, PeripheralRef}; use crate::pac; @@ -8,6 +9,8 @@ use crate::{interrupt, peripherals, Peripheral}; const DES_BLOCK_SIZE: usize = 8; // 64 bits const AES_BLOCK_SIZE: usize = 16; // 128 bits +/// Holds the state information for a cipher operation. +/// Allows suspending/resuming of cipher operations. pub struct Context<'c> { algo: Algorithm, mode: Mode, @@ -19,28 +22,44 @@ pub struct Context<'c> { key: &'c [u8], csgcmccm: [u32; 8], csgcm: [u32; 8], + header_len: u64, + payload_len: u64, } +/// Selects the encryption algorithm. #[derive(PartialEq, Clone, Copy)] pub enum Algorithm { + /// Advanced Encryption Standard AES, + /// Data Encryption Standard DES, + /// Triple-DES TDES, } +/// Selects the cipher mode. #[derive(PartialEq, Clone, Copy)] pub enum Mode { + /// Electronic Codebook ECB, + /// Cipher Block Chaining CBC, + /// Counter Mode CTR, + /// Galois Counter Mode GCM, + /// Galois Message Authentication Code GMAC, + /// Counter with CBC-MAC CCM, } +/// Selects whether the crypto processor operates in encryption or decryption mode. #[derive(PartialEq, Clone, Copy)] pub enum Direction { + /// Encryption mode Encrypt, + /// Decryption mode Decrypt, } @@ -49,6 +68,8 @@ pub struct Cryp<'d, T: Instance> { _peripheral: PeripheralRef<'d, T>, } +/// Initialization vector of arbitrary length. +/// When an initialization vector is not needed, `None` may be supplied. pub type InitVector<'v> = Option<&'v [u8]>; impl<'d, T: Instance> Cryp<'d, T> { @@ -62,6 +83,8 @@ impl<'d, T: Instance> Cryp<'d, T> { /// Start a new cipher operation. /// Key size must be 128, 192, or 256 bits. + /// Initialization vector must only be supplied if necessary. + /// Panics if there is any mismatch in parameters, such as an incorrect IV length or invalid mode. pub fn start<'c>(&self, key: &'c [u8], iv: InitVector, algo: Algorithm, mode: Mode, dir: Direction) -> Context<'c> { let mut ctx = Context { algo, @@ -74,6 +97,8 @@ impl<'d, T: Instance> Cryp<'d, T> { csgcmccm: [0; 8], csgcm: [0; 8], aad_complete: false, + header_len: 0, + payload_len: 0, }; T::regs().cr().modify(|w| w.set_crypen(false)); @@ -102,8 +127,6 @@ impl<'d, T: Instance> Cryp<'d, T> { panic!("IV length must be 128 bits for CCM."); } else if (mode == Mode::CTR) && (ivlen != 128) { panic!("IV length must be 128 bits for CTR."); - } else if (mode == Mode::GCM) && (ivlen != 96) { - panic!("IV length must be 96 bits for GCM."); } else if (mode == Mode::GMAC) && (ivlen != 96) { panic!("IV length must be 96 bits for GMAC."); } @@ -121,17 +144,27 @@ impl<'d, T: Instance> Cryp<'d, T> { Mode::ECB => T::regs().cr().modify(|w| w.set_algomode0(4)), Mode::CBC => T::regs().cr().modify(|w| w.set_algomode0(5)), Mode::CTR => T::regs().cr().modify(|w| w.set_algomode0(6)), - Mode::GCM => T::regs().cr().modify(|w| w.set_algomode0(8)), - Mode::GMAC => T::regs().cr().modify(|w| w.set_algomode0(8)), - Mode::CCM => T::regs().cr().modify(|w| w.set_algomode0(9)), + Mode::GCM => T::regs().cr().modify(|w| w.set_algomode0(0)), + Mode::GMAC => T::regs().cr().modify(|w| w.set_algomode0(0)), + Mode::CCM => T::regs().cr().modify(|w| w.set_algomode0(1)), + } + match mode { + Mode::ECB => T::regs().cr().modify(|w| w.set_algomode3(false)), + Mode::CBC => T::regs().cr().modify(|w| w.set_algomode3(false)), + Mode::CTR => T::regs().cr().modify(|w| w.set_algomode3(false)), + Mode::GCM => T::regs().cr().modify(|w| w.set_algomode3(true)), + Mode::GMAC => T::regs().cr().modify(|w| w.set_algomode3(true)), + Mode::CCM => T::regs().cr().modify(|w| w.set_algomode3(true)), } } else if algo == Algorithm::DES { + T::regs().cr().modify(|w| w.set_algomode3(false)); match mode { Mode::ECB => T::regs().cr().modify(|w| w.set_algomode0(2)), Mode::CBC => T::regs().cr().modify(|w| w.set_algomode0(3)), _ => panic!("Only ECB and CBC modes are valid for DES."), } } else if algo == Algorithm::TDES { + T::regs().cr().modify(|w| w.set_algomode3(false)); match mode { Mode::ECB => T::regs().cr().modify(|w| w.set_algomode0(0)), Mode::CBC => T::regs().cr().modify(|w| w.set_algomode0(1)), @@ -148,23 +181,26 @@ impl<'d, T: Instance> Cryp<'d, T> { // Load the IV into the registers. if let Some(iv) = iv { + let mut full_iv: [u8; 16] = [0; 16]; + full_iv[0..iv.len()].copy_from_slice(iv); + + if (mode == Mode::GCM) || (mode == Mode::GMAC) { + full_iv[15] = 2; + } + let mut iv_idx = 0; let mut iv_word: [u8; 4] = [0; 4]; - iv_word.copy_from_slice(&iv[iv_idx..iv_idx + 4]); + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); iv_idx += 4; T::regs().init(0).ivlr().write_value(u32::from_be_bytes(iv_word)); - iv_word.copy_from_slice(&iv[iv_idx..iv_idx + 4]); + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); iv_idx += 4; T::regs().init(0).ivrr().write_value(u32::from_be_bytes(iv_word)); - if iv.len() >= 12 { - iv_word.copy_from_slice(&iv[iv_idx..iv_idx + 4]); - iv_idx += 4; - T::regs().init(1).ivlr().write_value(u32::from_be_bytes(iv_word)); - } - if iv.len() >= 16 { - iv_word.copy_from_slice(&iv[iv_idx..iv_idx + 4]); - T::regs().init(1).ivrr().write_value(u32::from_be_bytes(iv_word)); - } + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); + iv_idx += 4; + T::regs().init(1).ivlr().write_value(u32::from_be_bytes(iv_word)); + iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]); + T::regs().init(1).ivrr().write_value(u32::from_be_bytes(iv_word)); } // Flush in/out FIFOs @@ -182,41 +218,116 @@ impl<'d, T: Instance> Cryp<'d, T> { ctx } - // pub fn aad_blocking(&self, ctx: &mut Context, aad: &[u8]) { - // if ctx.aad_complete { - // panic!("Cannot update AAD after calling 'update'!") - // } - // if (ctx.mode != Mode::GCM) && (ctx.mode != Mode::GMAC) && (ctx.mode != Mode::CCM) { - // panic!("Associated data only valid for GCM, GMAC, and CCM modes.") - // } - - // let mut header_size = 0; - // let mut header: [u8;] - - // if aad.len() < 65280 { - - // } - - // // GCM header phase - // T::regs().cr().modify(|w| w.set_gcm_ccmph(1)); - // T::regs().cr().modify(|w| w.set_crypen(true)); - // } - - pub fn update_blocking(&self, ctx: &mut Context, input: &[u8], output: &mut [u8], last_block: bool) { + /// Controls the header phase of cipher processing. + /// This function is only valid for GCM, CCM, and GMAC modes. + /// It only needs to be called if using one of these modes and there is associated data. + /// All AAD must be supplied to this function prior to starting the payload phase with `payload_blocking`. + /// The AAD must be supplied in multiples of the block size (128 bits), except when supplying the last block. + /// When supplying the last block of AAD, `last_aad_block` must be `true`. + pub fn aad_blocking(&self, ctx: &mut Context, aad: &[u8], last_aad_block: bool) { self.load_context(ctx); - ctx.aad_complete = true; - let block_size; if ctx.algo == Algorithm::DES { - block_size = 8; + block_size = DES_BLOCK_SIZE; } else { - block_size = 16; + block_size = AES_BLOCK_SIZE; + } + let last_block_remainder = aad.len() % block_size; + + // Perform checks for correctness. + if ctx.aad_complete { + panic!("Cannot update AAD after calling 'update'!") + } + if (ctx.mode != Mode::GCM) && (ctx.mode != Mode::GMAC) && (ctx.mode != Mode::CCM) { + panic!("Associated data only valid for GCM, GMAC, and CCM modes.") + } + if !last_aad_block { + if last_block_remainder != 0 { + panic!("Input length must be a multiple of {} bytes.", block_size); + } + } + + ctx.header_len += aad.len() as u64; + + // GCM header phase + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(1)); + T::regs().cr().modify(|w| w.set_crypen(true)); + + // Load data into core, block by block. + let num_full_blocks = aad.len() / block_size; + for block in 0..num_full_blocks { + let mut index = block * block_size; + let end_index = index + block_size; + // Write block in + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&aad[index..index + 4]); + T::regs().din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + // Block until input FIFO is empty. + while !T::regs().sr().read().ifem() {} + } + + // Handle the final block, which is incomplete. + if last_block_remainder > 0 { + let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; + last_block[..last_block_remainder].copy_from_slice(&aad[aad.len() - last_block_remainder..aad.len()]); + let mut index = 0; + let end_index = block_size; + // Write block in + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&last_block[index..index + 4]); + T::regs().din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + // Block until input FIFO is empty + while !T::regs().sr().read().ifem() {} + } + + if last_aad_block { + // Switch to payload phase. + ctx.aad_complete = true; + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); + T::regs().cr().modify(|w| w.fflush()); + } + + self.store_context(ctx); + } + + /// Performs encryption/decryption on the provided context. + /// The context determines algorithm, mode, and state of the crypto accelerator. + /// When the last piece of data is supplied, `last_block` should be `true`. + /// This function panics under various mismatches of parameters. + /// Input and output buffer lengths must match. + /// Data must be a multiple of block size (128-bits for AES, 64-bits for DES) for CBC and ECB modes. + /// Padding or ciphertext stealing must be managed by the application for these modes. + /// Data must also be a multiple of block size unless `last_block` is `true`. + pub fn payload_blocking(&self, ctx: &mut Context, input: &[u8], output: &mut [u8], last_block: bool) { + self.load_context(ctx); + + let block_size; + if ctx.algo == Algorithm::DES { + block_size = DES_BLOCK_SIZE; + } else { + block_size = AES_BLOCK_SIZE; } let last_block_remainder = input.len() % block_size; // Perform checks for correctness. - + if !ctx.aad_complete && ctx.header_len > 0 { + panic!("Additional associated data must be processed first!"); + } else if !ctx.aad_complete { + ctx.aad_complete = true; + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(2)); + T::regs().cr().modify(|w| w.fflush()); + T::regs().cr().modify(|w| w.set_crypen(true)); + } if ctx.mode == Mode::GMAC { panic!("GMAC works on header data only. Do not call this function for GMAC."); } @@ -270,14 +381,15 @@ impl<'d, T: Instance> Cryp<'d, T> { if ctx.mode == Mode::GCM && ctx.dir == Direction::Encrypt { //Handle special GCM partial block process. T::regs().cr().modify(|w| w.set_crypen(false)); - T::regs().cr().write(|w| w.set_algomode0(6)); + T::regs().cr().modify(|w| w.set_algomode3(false)); + T::regs().cr().modify(|w| w.set_algomode0(6)); let iv1r = T::regs().csgcmccmr(7).read() - 1; T::regs().init(1).ivrr().write_value(iv1r); T::regs().cr().modify(|w| w.set_crypen(true)); } - let mut intermediate_data: [u8; 16] = [0; 16]; - let mut last_block: [u8; 16] = [0; 16]; + let mut intermediate_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; + let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE]; last_block[..last_block_remainder].copy_from_slice(&input[input.len() - last_block_remainder..input.len()]); let mut index = 0; let end_index = block_size; @@ -307,7 +419,8 @@ impl<'d, T: Instance> Cryp<'d, T> { if ctx.mode == Mode::GCM && ctx.dir == Direction::Encrypt { //Handle special GCM partial block process. T::regs().cr().modify(|w| w.set_crypen(false)); - T::regs().cr().write(|w| w.set_algomode0(8)); + T::regs().cr().write(|w| w.set_algomode3(true)); + T::regs().cr().write(|w| w.set_algomode0(0)); T::regs().init(1).ivrr().write_value(2); T::regs().cr().modify(|w| w.set_crypen(true)); T::regs().cr().modify(|w| w.set_gcm_ccmph(3)); @@ -324,12 +437,51 @@ impl<'d, T: Instance> Cryp<'d, T> { } } } + + ctx.payload_len += input.len() as u64; + } + + /// This function only needs to be called for GCM, CCM, and GMAC modes to + /// generate an authentication tag. Calling this function on any other mode + /// does nothing except consumes the context. A buffer for the authentication + /// tag must be supplied. + pub fn finish_blocking(&self, mut ctx: Context, tag: &mut [u8; 16]) { + // Just consume the context if called for any other mode. + if (ctx.mode != Mode::GCM) || (ctx.mode != Mode::CCM) || (ctx.mode != Mode::GMAC) { + return; + } + + self.load_context(&mut ctx); + + T::regs().cr().modify(|w| w.set_crypen(false)); + T::regs().cr().modify(|w| w.set_gcm_ccmph(3)); + T::regs().cr().modify(|w| w.set_crypen(true)); + + let headerlen1: u32 = (ctx.header_len >> 32) as u32; + let headerlen2: u32 = ctx.header_len as u32; + let payloadlen1: u32 = (ctx.payload_len >> 32) as u32; + let payloadlen2: u32 = ctx.payload_len as u32; + + T::regs().din().write_value(headerlen1.swap_bytes()); + T::regs().din().write_value(headerlen2.swap_bytes()); + T::regs().din().write_value(payloadlen1.swap_bytes()); + T::regs().din().write_value(payloadlen2.swap_bytes()); + + while !T::regs().sr().read().ofne() {} + + tag[0..4].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); + tag[4..8].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); + tag[8..12].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); + tag[12..16].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice()); + + T::regs().cr().modify(|w| w.set_crypen(false)); } fn prepare_key(&self, ctx: &Context) { if ctx.algo == Algorithm::AES && ctx.dir == Direction::Decrypt { if (ctx.mode == Mode::ECB) || (ctx.mode == Mode::CBC) { T::regs().cr().modify(|w| w.set_algomode0(7)); + T::regs().cr().modify(|w| w.set_algomode3(false)); T::regs().cr().modify(|w| w.set_crypen(true)); while T::regs().sr().read().busy() {} } From fec26e896052cc0eac6bfa6415a4ebad5352d1d9 Mon Sep 17 00:00:00 2001 From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com> Date: Sun, 18 Feb 2024 21:40:18 -0500 Subject: [PATCH 06/23] Refactored ciphers into traits. --- embassy-stm32/src/cryp/mod.rs | 651 ++++++++++++++++++++++------------ 1 file changed, 431 insertions(+), 220 deletions(-) diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs index 447bcf2f8..29c1db12e 100644 --- a/embassy-stm32/src/cryp/mod.rs +++ b/embassy-stm32/src/cryp/mod.rs @@ -1,4 +1,6 @@ //! Crypto Accelerator (CRYP) +use core::marker::PhantomData; + use embassy_hal_internal::{into_ref, PeripheralRef}; use crate::pac; @@ -9,51 +11,375 @@ use crate::{interrupt, peripherals, Peripheral}; const DES_BLOCK_SIZE: usize = 8; // 64 bits const AES_BLOCK_SIZE: usize = 16; // 128 bits +/// This trait encapsulates all cipher-specific behavior/ +pub trait Cipher<'c> { + /// Processing block size. Determined by the processor and the algorithm. + const BLOCK_SIZE: usize; + + /// Indicates whether the cipher requires the application to provide padding. + /// If `true`, no partial blocks will be accepted (a panic will occur). + const REQUIRES_PADDING: bool = false; + + /// Returns the symmetric key. + fn key(&self) -> &'c [u8]; + + /// Returns the initialization vector. + fn iv(&self) -> &[u8]; + + /// Sets the processor algorithm mode according to the associated cipher. + fn set_algomode(&self, p: &pac::cryp::Cryp); + + /// Performs any key preparation within the processor, if necessary. + fn prepare_key(&self, _p: &pac::cryp::Cryp) {} + + /// Performs any cipher-specific initialization. + fn init_phase(&self, _p: &pac::cryp::Cryp) {} + + /// Called prior to processing the last data block for cipher-specific operations. + fn pre_final_block(&self, _p: &pac::cryp::Cryp) {} + + /// Called after processing the last data block for cipher-specific operations. + fn post_final_block(&self, _p: &pac::cryp::Cryp, _dir: Direction, _int_data: &[u8; AES_BLOCK_SIZE]) {} +} + +/// This trait enables restriction of ciphers to specific key sizes. +pub trait CipherSized {} + +/// This trait enables restriction of a header phase to authenticated ciphers only. +pub trait CipherAuthenticated {} + +/// AES-ECB Cipher Mode +pub struct AesEcb<'c, const KEY_SIZE: usize> { + iv: &'c [u8; 0], + key: &'c [u8; KEY_SIZE], +} + +impl<'c, const KEY_SIZE: usize> AesEcb<'c, KEY_SIZE> { + /// Constructs a new AES-ECB cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE]) -> Self { + return Self { key: key, iv: &[0; 0] }; + } +} + +impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesEcb<'c, KEY_SIZE> { + const BLOCK_SIZE: usize = AES_BLOCK_SIZE; + const REQUIRES_PADDING: bool = true; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &'c [u8] { + self.iv + } + + fn prepare_key(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_algomode0(7)); + p.cr().modify(|w| w.set_algomode3(false)); + p.cr().modify(|w| w.set_crypen(true)); + while p.sr().read().busy() {} + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_algomode0(4)); + p.cr().modify(|w| w.set_algomode3(false)); + } +} + +impl<'c> CipherSized for AesEcb<'c, { 128 / 8 }> {} +impl<'c> CipherSized for AesEcb<'c, { 192 / 8 }> {} +impl<'c> CipherSized for AesEcb<'c, { 256 / 8 }> {} + +/// AES-CBC Cipher Mode +pub struct AesCbc<'c, const KEY_SIZE: usize> { + iv: &'c [u8; 16], + key: &'c [u8; KEY_SIZE], +} + +impl<'c, const KEY_SIZE: usize> AesCbc<'c, KEY_SIZE> { + /// Constructs a new AES-CBC cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; 16]) -> Self { + return Self { key: key, iv: iv }; + } +} + +impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesCbc<'c, KEY_SIZE> { + const BLOCK_SIZE: usize = AES_BLOCK_SIZE; + const REQUIRES_PADDING: bool = true; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &'c [u8] { + self.iv + } + + fn prepare_key(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_algomode0(7)); + p.cr().modify(|w| w.set_algomode3(false)); + p.cr().modify(|w| w.set_crypen(true)); + while p.sr().read().busy() {} + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_algomode0(5)); + p.cr().modify(|w| w.set_algomode3(false)); + } +} + +impl<'c> CipherSized for AesCbc<'c, { 128 / 8 }> {} +impl<'c> CipherSized for AesCbc<'c, { 192 / 8 }> {} +impl<'c> CipherSized for AesCbc<'c, { 256 / 8 }> {} + +/// AES-CTR Cipher Mode +pub struct AesCtr<'c, const KEY_SIZE: usize> { + iv: &'c [u8; 16], + key: &'c [u8; KEY_SIZE], +} + +impl<'c, const KEY_SIZE: usize> AesCtr<'c, KEY_SIZE> { + /// Constructs a new AES-CTR cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; 16]) -> Self { + return Self { key: key, iv: iv }; + } +} + +impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesCtr<'c, KEY_SIZE> { + const BLOCK_SIZE: usize = AES_BLOCK_SIZE; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &'c [u8] { + self.iv + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_algomode0(6)); + p.cr().modify(|w| w.set_algomode3(false)); + } +} + +impl<'c> CipherSized for AesCtr<'c, { 128 / 8 }> {} +impl<'c> CipherSized for AesCtr<'c, { 192 / 8 }> {} +impl<'c> CipherSized for AesCtr<'c, { 256 / 8 }> {} + +///AES-GCM Cipher Mode +pub struct AesGcm<'c, const KEY_SIZE: usize> { + iv: [u8; 16], + key: &'c [u8; KEY_SIZE], +} + +impl<'c, const KEY_SIZE: usize> AesGcm<'c, KEY_SIZE> { + /// Constucts a new AES-GCM cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; 12]) -> Self { + let mut new_gcm = Self { key: key, iv: [0; 16] }; + new_gcm.iv[..12].copy_from_slice(iv); + new_gcm.iv[15] = 2; + new_gcm + } +} + +impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> { + const BLOCK_SIZE: usize = AES_BLOCK_SIZE; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &[u8] { + self.iv.as_slice() + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_algomode0(0)); + p.cr().modify(|w| w.set_algomode3(true)); + } + + fn init_phase(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_gcm_ccmph(0)); + p.cr().modify(|w| w.set_crypen(true)); + while p.cr().read().crypen() {} + } + + fn pre_final_block(&self, p: &pac::cryp::Cryp) { + //Handle special GCM partial block process. + p.cr().modify(|w| w.set_crypen(false)); + p.cr().modify(|w| w.set_algomode3(false)); + p.cr().modify(|w| w.set_algomode0(6)); + let iv1r = p.csgcmccmr(7).read() - 1; + p.init(1).ivrr().write_value(iv1r); + p.cr().modify(|w| w.set_crypen(true)); + } + + fn post_final_block(&self, p: &pac::cryp::Cryp, dir: Direction, int_data: &[u8; AES_BLOCK_SIZE]) { + if dir == Direction::Encrypt { + //Handle special GCM partial block process. + p.cr().modify(|w| w.set_crypen(false)); + p.cr().write(|w| w.set_algomode3(true)); + p.cr().write(|w| w.set_algomode0(0)); + p.init(1).ivrr().write_value(2); + p.cr().modify(|w| w.set_crypen(true)); + p.cr().modify(|w| w.set_gcm_ccmph(3)); + let mut index = 0; + let end_index = Self::BLOCK_SIZE; + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&int_data[index..index + 4]); + p.din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + for _ in 0..4 { + p.dout().read(); + } + } + } +} + +impl<'c> CipherSized for AesGcm<'c, { 128 / 8 }> {} +impl<'c> CipherSized for AesGcm<'c, { 192 / 8 }> {} +impl<'c> CipherSized for AesGcm<'c, { 256 / 8 }> {} +impl<'c, const KEY_SIZE: usize> CipherAuthenticated for AesGcm<'c, KEY_SIZE> {} + +/// AES-GMAC Cipher Mode +pub struct AesGmac<'c, const KEY_SIZE: usize> { + iv: [u8; 16], + key: &'c [u8; KEY_SIZE], +} + +impl<'c, const KEY_SIZE: usize> AesGmac<'c, KEY_SIZE> { + /// Constructs a new AES-GMAC cipher for a cryptographic operation. + pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; 12]) -> Self { + let mut new_gmac = Self { key: key, iv: [0; 16] }; + new_gmac.iv[..12].copy_from_slice(iv); + new_gmac.iv[15] = 2; + new_gmac + } +} + +impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> { + const BLOCK_SIZE: usize = AES_BLOCK_SIZE; + + fn key(&self) -> &'c [u8] { + self.key + } + + fn iv(&self) -> &[u8] { + self.iv.as_slice() + } + + fn set_algomode(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_algomode0(0)); + p.cr().modify(|w| w.set_algomode3(true)); + } + + fn init_phase(&self, p: &pac::cryp::Cryp) { + p.cr().modify(|w| w.set_gcm_ccmph(0)); + p.cr().modify(|w| w.set_crypen(true)); + while p.cr().read().crypen() {} + } + + fn pre_final_block(&self, p: &pac::cryp::Cryp) { + //Handle special GCM partial block process. + p.cr().modify(|w| w.set_crypen(false)); + p.cr().modify(|w| w.set_algomode3(false)); + p.cr().modify(|w| w.set_algomode0(6)); + let iv1r = p.csgcmccmr(7).read() - 1; + p.init(1).ivrr().write_value(iv1r); + p.cr().modify(|w| w.set_crypen(true)); + } + + fn post_final_block(&self, p: &pac::cryp::Cryp, dir: Direction, int_data: &[u8; AES_BLOCK_SIZE]) { + if dir == Direction::Encrypt { + //Handle special GCM partial block process. + p.cr().modify(|w| w.set_crypen(false)); + p.cr().write(|w| w.set_algomode3(true)); + p.cr().write(|w| w.set_algomode0(0)); + p.init(1).ivrr().write_value(2); + p.cr().modify(|w| w.set_crypen(true)); + p.cr().modify(|w| w.set_gcm_ccmph(3)); + let mut index = 0; + let end_index = Self::BLOCK_SIZE; + while index < end_index { + let mut in_word: [u8; 4] = [0; 4]; + in_word.copy_from_slice(&int_data[index..index + 4]); + p.din().write_value(u32::from_ne_bytes(in_word)); + index += 4; + } + for _ in 0..4 { + p.dout().read(); + } + } + } +} + +impl<'c> CipherSized for AesGmac<'c, { 128 / 8 }> {} +impl<'c> CipherSized for AesGmac<'c, { 192 / 8 }> {} +impl<'c> CipherSized for AesGmac<'c, { 256 / 8 }> {} +impl<'c, const KEY_SIZE: usize> CipherAuthenticated for AesGmac<'c, KEY_SIZE> {} + +// struct AesCcm<'c, const KEY_SIZE: usize> { +// iv: &'c [u8], +// key: &'c [u8; KEY_SIZE], +// aad_len: usize, +// payload_len: usize, +// } + +// impl<'c, const KEY_SIZE: usize> AesCcm<'c, KEY_SIZE> { +// pub fn new(&self, key: &[u8; KEY_SIZE], iv: &[u8], aad_len: usize, payload_len: usize) { +// if iv.len() > 13 { +// panic!("CCM IV length must be 13 bytes or less."); +// } +// self.key = key; +// self.iv = iv; +// self.aad_len = aad_len; +// self.payload_len = payload_len; +// } +// } + +// impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesCcm<'c, KEY_SIZE> { +// const BLOCK_SIZE: usize = AES_BLOCK_SIZE; + +// fn key(&self) -> &'c [u8] { +// self.key +// } + +// fn iv(&self) -> &'c [u8] { +// self.iv +// } + +// fn set_algomode(&self, p: &pac::cryp::Cryp) { +// p.cr().modify(|w| w.set_algomode0(1)); +// p.cr().modify(|w| w.set_algomode3(true)); +// } + +// fn init_phase(&self, p: &pac::cryp::Cryp) { +// todo!(); +// } +// } + +// impl<'c> CipherSized for AesCcm<'c, { 128 / 8 }> {} +// impl<'c> CipherSized for AesCcm<'c, { 192 / 8 }> {} +// impl<'c> CipherSized for AesCcm<'c, { 256 / 8 }> {} + /// Holds the state information for a cipher operation. /// Allows suspending/resuming of cipher operations. -pub struct Context<'c> { - algo: Algorithm, - mode: Mode, +pub struct Context<'c, C: Cipher<'c> + CipherSized> { + phantom_data: PhantomData<&'c C>, + cipher: &'c C, dir: Direction, last_block_processed: bool, aad_complete: bool, cr: u32, iv: [u32; 4], - key: &'c [u8], csgcmccm: [u32; 8], csgcm: [u32; 8], header_len: u64, payload_len: u64, } -/// Selects the encryption algorithm. -#[derive(PartialEq, Clone, Copy)] -pub enum Algorithm { - /// Advanced Encryption Standard - AES, - /// Data Encryption Standard - DES, - /// Triple-DES - TDES, -} - -/// Selects the cipher mode. -#[derive(PartialEq, Clone, Copy)] -pub enum Mode { - /// Electronic Codebook - ECB, - /// Cipher Block Chaining - CBC, - /// Counter Mode - CTR, - /// Galois Counter Mode - GCM, - /// Galois Message Authentication Code - GMAC, - /// Counter with CBC-MAC - CCM, -} - /// Selects whether the crypto processor operates in encryption or decryption mode. #[derive(PartialEq, Clone, Copy)] pub enum Direction { @@ -68,10 +394,6 @@ pub struct Cryp<'d, T: Instance> { _peripheral: PeripheralRef<'d, T>, } -/// Initialization vector of arbitrary length. -/// When an initialization vector is not needed, `None` may be supplied. -pub type InitVector<'v> = Option<&'v [u8]>; - impl<'d, T: Instance> Cryp<'d, T> { /// Create a new CRYP driver. pub fn new(peri: impl Peripheral
+ 'd) -> Self {
@@ -85,51 +407,31 @@ impl<'d, T: Instance> Cryp<'d, T> {
/// Key size must be 128, 192, or 256 bits.
/// Initialization vector must only be supplied if necessary.
/// Panics if there is any mismatch in parameters, such as an incorrect IV length or invalid mode.
- pub fn start<'c>(&self, key: &'c [u8], iv: InitVector, algo: Algorithm, mode: Mode, dir: Direction) -> Context<'c> {
- let mut ctx = Context {
- algo,
- mode,
+ pub fn start<'c, C: Cipher<'c> + CipherSized>(&self, cipher: &'c C, dir: Direction) -> Context<'c, C> {
+ let mut ctx: Context<'c, C> = Context {
dir,
last_block_processed: false,
cr: 0,
iv: [0; 4],
- key,
csgcmccm: [0; 8],
csgcm: [0; 8],
aad_complete: false,
header_len: 0,
payload_len: 0,
+ cipher: cipher,
+ phantom_data: PhantomData,
};
T::regs().cr().modify(|w| w.set_crypen(false));
- // Checks for correctness
- if algo == Algorithm::AES {
- let keylen = key.len() * 8;
- let ivlen;
- if let Some(iv) = iv {
- ivlen = iv.len() * 8;
- } else {
- ivlen = 0;
- }
- match keylen {
- 128 => T::regs().cr().modify(|w| w.set_keysize(0)),
- 192 => T::regs().cr().modify(|w| w.set_keysize(1)),
- 256 => T::regs().cr().modify(|w| w.set_keysize(2)),
- _ => panic!("Key length must be 128, 192, or 256 bits."),
- }
+ let key = ctx.cipher.key();
- if (mode == Mode::GCM) && (ivlen != 96) {
- panic!("IV length must be 96 bits for GCM.");
- } else if (mode == Mode::CBC) && (ivlen != 128) {
- panic!("IV length must be 128 bits for CBC.");
- } else if (mode == Mode::CCM) && (ivlen != 128) {
- panic!("IV length must be 128 bits for CCM.");
- } else if (mode == Mode::CTR) && (ivlen != 128) {
- panic!("IV length must be 128 bits for CTR.");
- } else if (mode == Mode::GMAC) && (ivlen != 96) {
- panic!("IV length must be 96 bits for GMAC.");
- }
+ if key.len() == (128 / 8) {
+ T::regs().cr().modify(|w| w.set_keysize(0));
+ } else if key.len() == (192 / 8) {
+ T::regs().cr().modify(|w| w.set_keysize(1));
+ } else if key.len() == (256 / 8) {
+ T::regs().cr().modify(|w| w.set_keysize(2));
}
self.load_key(key);
@@ -137,40 +439,9 @@ impl<'d, T: Instance> Cryp<'d, T> {
// Set data type to 8-bit. This will match software implementations.
T::regs().cr().modify(|w| w.set_datatype(2));
- self.prepare_key(&ctx);
+ ctx.cipher.prepare_key(&T::regs());
- if algo == Algorithm::AES {
- match mode {
- Mode::ECB => T::regs().cr().modify(|w| w.set_algomode0(4)),
- Mode::CBC => T::regs().cr().modify(|w| w.set_algomode0(5)),
- Mode::CTR => T::regs().cr().modify(|w| w.set_algomode0(6)),
- Mode::GCM => T::regs().cr().modify(|w| w.set_algomode0(0)),
- Mode::GMAC => T::regs().cr().modify(|w| w.set_algomode0(0)),
- Mode::CCM => T::regs().cr().modify(|w| w.set_algomode0(1)),
- }
- match mode {
- Mode::ECB => T::regs().cr().modify(|w| w.set_algomode3(false)),
- Mode::CBC => T::regs().cr().modify(|w| w.set_algomode3(false)),
- Mode::CTR => T::regs().cr().modify(|w| w.set_algomode3(false)),
- Mode::GCM => T::regs().cr().modify(|w| w.set_algomode3(true)),
- Mode::GMAC => T::regs().cr().modify(|w| w.set_algomode3(true)),
- Mode::CCM => T::regs().cr().modify(|w| w.set_algomode3(true)),
- }
- } else if algo == Algorithm::DES {
- T::regs().cr().modify(|w| w.set_algomode3(false));
- match mode {
- Mode::ECB => T::regs().cr().modify(|w| w.set_algomode0(2)),
- Mode::CBC => T::regs().cr().modify(|w| w.set_algomode0(3)),
- _ => panic!("Only ECB and CBC modes are valid for DES."),
- }
- } else if algo == Algorithm::TDES {
- T::regs().cr().modify(|w| w.set_algomode3(false));
- match mode {
- Mode::ECB => T::regs().cr().modify(|w| w.set_algomode0(0)),
- Mode::CBC => T::regs().cr().modify(|w| w.set_algomode0(1)),
- _ => panic!("Only ECB and CBC modes are valid for TDES."),
- }
- }
+ ctx.cipher.set_algomode(&T::regs());
// Set encrypt/decrypt
if dir == Direction::Encrypt {
@@ -180,38 +451,27 @@ impl<'d, T: Instance> Cryp<'d, T> {
}
// Load the IV into the registers.
- if let Some(iv) = iv {
- let mut full_iv: [u8; 16] = [0; 16];
- full_iv[0..iv.len()].copy_from_slice(iv);
-
- if (mode == Mode::GCM) || (mode == Mode::GMAC) {
- full_iv[15] = 2;
- }
-
- let mut iv_idx = 0;
- let mut iv_word: [u8; 4] = [0; 4];
- iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]);
- iv_idx += 4;
- T::regs().init(0).ivlr().write_value(u32::from_be_bytes(iv_word));
- iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]);
- iv_idx += 4;
- T::regs().init(0).ivrr().write_value(u32::from_be_bytes(iv_word));
- iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]);
- iv_idx += 4;
- T::regs().init(1).ivlr().write_value(u32::from_be_bytes(iv_word));
- iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]);
- T::regs().init(1).ivrr().write_value(u32::from_be_bytes(iv_word));
- }
+ let iv = ctx.cipher.iv();
+ let mut full_iv: [u8; 16] = [0; 16];
+ full_iv[0..iv.len()].copy_from_slice(iv);
+ let mut iv_idx = 0;
+ let mut iv_word: [u8; 4] = [0; 4];
+ iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]);
+ iv_idx += 4;
+ T::regs().init(0).ivlr().write_value(u32::from_be_bytes(iv_word));
+ iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]);
+ iv_idx += 4;
+ T::regs().init(0).ivrr().write_value(u32::from_be_bytes(iv_word));
+ iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]);
+ iv_idx += 4;
+ T::regs().init(1).ivlr().write_value(u32::from_be_bytes(iv_word));
+ iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]);
+ T::regs().init(1).ivrr().write_value(u32::from_be_bytes(iv_word));
// Flush in/out FIFOs
T::regs().cr().modify(|w| w.fflush());
- if mode == Mode::GCM {
- // GCM init phase
- T::regs().cr().modify(|w| w.set_gcm_ccmph(0));
- T::regs().cr().modify(|w| w.set_crypen(true));
- while T::regs().cr().read().crypen() {}
- }
+ ctx.cipher.init_phase(&T::regs());
self.store_context(&mut ctx);
@@ -224,42 +484,38 @@ impl<'d, T: Instance> Cryp<'d, T> {
/// All AAD must be supplied to this function prior to starting the payload phase with `payload_blocking`.
/// The AAD must be supplied in multiples of the block size (128 bits), except when supplying the last block.
/// When supplying the last block of AAD, `last_aad_block` must be `true`.
- pub fn aad_blocking(&self, ctx: &mut Context, aad: &[u8], last_aad_block: bool) {
+ pub fn aad_blocking<'c, C: Cipher<'c> + CipherSized + CipherAuthenticated>(
+ &self,
+ ctx: &mut Context<'c, C>,
+ aad: &[u8],
+ last_aad_block: bool,
+ ) {
self.load_context(ctx);
- let block_size;
- if ctx.algo == Algorithm::DES {
- block_size = DES_BLOCK_SIZE;
- } else {
- block_size = AES_BLOCK_SIZE;
- }
- let last_block_remainder = aad.len() % block_size;
+ let last_block_remainder = aad.len() % C::BLOCK_SIZE;
// Perform checks for correctness.
if ctx.aad_complete {
panic!("Cannot update AAD after calling 'update'!")
}
- if (ctx.mode != Mode::GCM) && (ctx.mode != Mode::GMAC) && (ctx.mode != Mode::CCM) {
- panic!("Associated data only valid for GCM, GMAC, and CCM modes.")
- }
if !last_aad_block {
if last_block_remainder != 0 {
- panic!("Input length must be a multiple of {} bytes.", block_size);
+ panic!("Input length must be a multiple of {} bytes.", C::BLOCK_SIZE);
}
}
ctx.header_len += aad.len() as u64;
- // GCM header phase
+ // Header phase
T::regs().cr().modify(|w| w.set_crypen(false));
T::regs().cr().modify(|w| w.set_gcm_ccmph(1));
T::regs().cr().modify(|w| w.set_crypen(true));
// Load data into core, block by block.
- let num_full_blocks = aad.len() / block_size;
+ let num_full_blocks = aad.len() / C::BLOCK_SIZE;
for block in 0..num_full_blocks {
- let mut index = block * block_size;
- let end_index = index + block_size;
+ let mut index = block * C::BLOCK_SIZE;
+ let end_index = index + C::BLOCK_SIZE;
// Write block in
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
@@ -276,7 +532,7 @@ impl<'d, T: Instance> Cryp<'d, T> {
let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE];
last_block[..last_block_remainder].copy_from_slice(&aad[aad.len() - last_block_remainder..aad.len()]);
let mut index = 0;
- let end_index = block_size;
+ let end_index = C::BLOCK_SIZE;
// Write block in
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
@@ -307,16 +563,16 @@ impl<'d, T: Instance> Cryp<'d, T> {
/// Data must be a multiple of block size (128-bits for AES, 64-bits for DES) for CBC and ECB modes.
/// Padding or ciphertext stealing must be managed by the application for these modes.
/// Data must also be a multiple of block size unless `last_block` is `true`.
- pub fn payload_blocking(&self, ctx: &mut Context, input: &[u8], output: &mut [u8], last_block: bool) {
+ pub fn payload_blocking<'c, C: Cipher<'c> + CipherSized>(
+ &self,
+ ctx: &mut Context<'c, C>,
+ input: &[u8],
+ output: &mut [u8],
+ last_block: bool,
+ ) {
self.load_context(ctx);
- let block_size;
- if ctx.algo == Algorithm::DES {
- block_size = DES_BLOCK_SIZE;
- } else {
- block_size = AES_BLOCK_SIZE;
- }
- let last_block_remainder = input.len() % block_size;
+ let last_block_remainder = input.len() % C::BLOCK_SIZE;
// Perform checks for correctness.
if !ctx.aad_complete && ctx.header_len > 0 {
@@ -328,9 +584,6 @@ impl<'d, T: Instance> Cryp<'d, T> {
T::regs().cr().modify(|w| w.fflush());
T::regs().cr().modify(|w| w.set_crypen(true));
}
- if ctx.mode == Mode::GMAC {
- panic!("GMAC works on header data only. Do not call this function for GMAC.");
- }
if ctx.last_block_processed {
panic!("The last block has already been processed!");
}
@@ -339,24 +592,23 @@ impl<'d, T: Instance> Cryp<'d, T> {
}
if !last_block {
if last_block_remainder != 0 {
- panic!("Input length must be a multiple of {} bytes.", block_size);
+ panic!("Input length must be a multiple of {} bytes.", C::BLOCK_SIZE);
}
}
- if (ctx.mode == Mode::ECB) || (ctx.mode == Mode::CBC) {
+ if C::REQUIRES_PADDING {
if last_block_remainder != 0 {
- panic!("Input must be a multiple of {} bytes in ECB and CBC modes. Consider padding or ciphertext stealing.", block_size);
+ panic!("Input must be a multiple of {} bytes in ECB and CBC modes. Consider padding or ciphertext stealing.", C::BLOCK_SIZE);
}
}
-
if last_block {
ctx.last_block_processed = true;
}
// Load data into core, block by block.
- let num_full_blocks = input.len() / block_size;
+ let num_full_blocks = input.len() / C::BLOCK_SIZE;
for block in 0..num_full_blocks {
- let mut index = block * block_size;
- let end_index = index + block_size;
+ let mut index = block * C::BLOCK_SIZE;
+ let end_index = index + C::BLOCK_SIZE;
// Write block in
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
@@ -364,8 +616,8 @@ impl<'d, T: Instance> Cryp<'d, T> {
T::regs().din().write_value(u32::from_ne_bytes(in_word));
index += 4;
}
- let mut index = block * block_size;
- let end_index = index + block_size;
+ let mut index = block * C::BLOCK_SIZE;
+ let end_index = index + C::BLOCK_SIZE;
// Block until there is output to read.
while !T::regs().sr().read().ofne() {}
// Read block out
@@ -378,21 +630,13 @@ impl<'d, T: Instance> Cryp<'d, T> {
// Handle the final block, which is incomplete.
if last_block_remainder > 0 {
- if ctx.mode == Mode::GCM && ctx.dir == Direction::Encrypt {
- //Handle special GCM partial block process.
- T::regs().cr().modify(|w| w.set_crypen(false));
- T::regs().cr().modify(|w| w.set_algomode3(false));
- T::regs().cr().modify(|w| w.set_algomode0(6));
- let iv1r = T::regs().csgcmccmr(7).read() - 1;
- T::regs().init(1).ivrr().write_value(iv1r);
- T::regs().cr().modify(|w| w.set_crypen(true));
- }
+ ctx.cipher.pre_final_block(&T::regs());
let mut intermediate_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE];
let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE];
last_block[..last_block_remainder].copy_from_slice(&input[input.len() - last_block_remainder..input.len()]);
let mut index = 0;
- let end_index = block_size;
+ let end_index = C::BLOCK_SIZE;
// Write block in
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
@@ -401,7 +645,7 @@ impl<'d, T: Instance> Cryp<'d, T> {
index += 4;
}
let mut index = 0;
- let end_index = block_size;
+ let end_index = C::BLOCK_SIZE;
// Block until there is output to read.
while !T::regs().sr().read().ofne() {}
// Read block out
@@ -416,41 +660,19 @@ impl<'d, T: Instance> Cryp<'d, T> {
output[output_len - last_block_remainder..output_len]
.copy_from_slice(&intermediate_data[0..last_block_remainder]);
- if ctx.mode == Mode::GCM && ctx.dir == Direction::Encrypt {
- //Handle special GCM partial block process.
- T::regs().cr().modify(|w| w.set_crypen(false));
- T::regs().cr().write(|w| w.set_algomode3(true));
- T::regs().cr().write(|w| w.set_algomode0(0));
- T::regs().init(1).ivrr().write_value(2);
- T::regs().cr().modify(|w| w.set_crypen(true));
- T::regs().cr().modify(|w| w.set_gcm_ccmph(3));
- let mut index = 0;
- let end_index = block_size;
- while index < end_index {
- let mut in_word: [u8; 4] = [0; 4];
- in_word.copy_from_slice(&intermediate_data[index..index + 4]);
- T::regs().din().write_value(u32::from_ne_bytes(in_word));
- index += 4;
- }
- for _ in 0..4 {
- T::regs().dout().read();
- }
- }
+ ctx.cipher.post_final_block(&T::regs(), ctx.dir, &intermediate_data);
}
ctx.payload_len += input.len() as u64;
}
/// This function only needs to be called for GCM, CCM, and GMAC modes to
- /// generate an authentication tag. Calling this function on any other mode
- /// does nothing except consumes the context. A buffer for the authentication
- /// tag must be supplied.
- pub fn finish_blocking(&self, mut ctx: Context, tag: &mut [u8; 16]) {
- // Just consume the context if called for any other mode.
- if (ctx.mode != Mode::GCM) || (ctx.mode != Mode::CCM) || (ctx.mode != Mode::GMAC) {
- return;
- }
-
+ /// generate an authentication tag.
+ pub fn finish_blocking<'c, C: Cipher<'c> + CipherSized + CipherAuthenticated>(
+ &self,
+ mut ctx: Context<'c, C>,
+ tag: &mut [u8; 16],
+ ) {
self.load_context(&mut ctx);
T::regs().cr().modify(|w| w.set_crypen(false));
@@ -477,17 +699,6 @@ impl<'d, T: Instance> Cryp<'d, T> {
T::regs().cr().modify(|w| w.set_crypen(false));
}
- fn prepare_key(&self, ctx: &Context) {
- if ctx.algo == Algorithm::AES && ctx.dir == Direction::Decrypt {
- if (ctx.mode == Mode::ECB) || (ctx.mode == Mode::CBC) {
- T::regs().cr().modify(|w| w.set_algomode0(7));
- T::regs().cr().modify(|w| w.set_algomode3(false));
- T::regs().cr().modify(|w| w.set_crypen(true));
- while T::regs().sr().read().busy() {}
- }
- }
- }
-
fn load_key(&self, key: &[u8]) {
// Load the key into the registers.
let mut keyidx = 0;
@@ -524,7 +735,7 @@ impl<'d, T: Instance> Cryp<'d, T> {
T::regs().key(3).krr().write_value(u32::from_be_bytes(keyword));
}
- fn store_context(&self, ctx: &mut Context) {
+ fn store_context<'c, C: Cipher<'c> + CipherSized>(&self, ctx: &mut Context<'c, C>) {
// Wait for data block processing to finish.
while !T::regs().sr().read().ifem() {}
while T::regs().sr().read().ofne() {}
@@ -545,7 +756,7 @@ impl<'d, T: Instance> Cryp<'d, T> {
}
}
- fn load_context(&self, ctx: &Context) {
+ fn load_context<'c, C: Cipher<'c> + CipherSized>(&self, ctx: &Context<'c, C>) {
// Reload state registers.
T::regs().cr().write(|w| w.0 = ctx.cr);
T::regs().init(0).ivlr().write_value(ctx.iv[0]);
@@ -556,10 +767,10 @@ impl<'d, T: Instance> Cryp<'d, T> {
T::regs().csgcmccmr(i).write_value(ctx.csgcmccm[i]);
T::regs().csgcmr(i).write_value(ctx.csgcm[i]);
}
- self.load_key(ctx.key);
+ self.load_key(ctx.cipher.key());
// Prepare key if applicable.
- self.prepare_key(ctx);
+ ctx.cipher.prepare_key(&T::regs());
T::regs().cr().write(|w| w.0 = ctx.cr);
// Enable crypto processor.
From 690b2118c6fdad88bf1e595b6a0c0afdb0583d28 Mon Sep 17 00:00:00 2001
From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com>
Date: Tue, 20 Feb 2024 11:54:39 -0500
Subject: [PATCH 07/23] CCM mode functional.
---
embassy-stm32/src/cryp/mod.rs | 372 ++++++++++++++++++++++++++--------
1 file changed, 293 insertions(+), 79 deletions(-)
diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs
index 29c1db12e..fe248def1 100644
--- a/embassy-stm32/src/cryp/mod.rs
+++ b/embassy-stm32/src/cryp/mod.rs
@@ -1,6 +1,6 @@
//! Crypto Accelerator (CRYP)
+use core::cmp::min;
use core::marker::PhantomData;
-
use embassy_hal_internal::{into_ref, PeripheralRef};
use crate::pac;
@@ -21,7 +21,7 @@ pub trait Cipher<'c> {
const REQUIRES_PADDING: bool = false;
/// Returns the symmetric key.
- fn key(&self) -> &'c [u8];
+ fn key(&self) -> &[u8];
/// Returns the initialization vector.
fn iv(&self) -> &[u8];
@@ -36,10 +36,25 @@ pub trait Cipher<'c> {
fn init_phase(&self, _p: &pac::cryp::Cryp) {}
/// Called prior to processing the last data block for cipher-specific operations.
- fn pre_final_block(&self, _p: &pac::cryp::Cryp) {}
+ fn pre_final_block(&self, _p: &pac::cryp::Cryp, _dir: Direction) -> [u32; 4] {
+ return [0; 4];
+ }
/// Called after processing the last data block for cipher-specific operations.
- fn post_final_block(&self, _p: &pac::cryp::Cryp, _dir: Direction, _int_data: &[u8; AES_BLOCK_SIZE]) {}
+ fn post_final_block(
+ &self,
+ _p: &pac::cryp::Cryp,
+ _dir: Direction,
+ _int_data: &[u8; AES_BLOCK_SIZE],
+ _temp1: [u32; 4],
+ _padding_mask: [u8; 16],
+ ) {
+ }
+
+ /// Called prior to processing the first associated data block for cipher-specific operations.
+ fn get_header_block(&self) -> &[u8] {
+ return [0; 0].as_slice();
+ }
}
/// This trait enables restriction of ciphers to specific key sizes.
@@ -204,17 +219,27 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> {
while p.cr().read().crypen() {}
}
- fn pre_final_block(&self, p: &pac::cryp::Cryp) {
+ fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction) -> [u32; 4] {
//Handle special GCM partial block process.
- p.cr().modify(|w| w.set_crypen(false));
- p.cr().modify(|w| w.set_algomode3(false));
- p.cr().modify(|w| w.set_algomode0(6));
- let iv1r = p.csgcmccmr(7).read() - 1;
- p.init(1).ivrr().write_value(iv1r);
- p.cr().modify(|w| w.set_crypen(true));
+ if dir == Direction::Encrypt {
+ p.cr().modify(|w| w.set_crypen(false));
+ p.cr().modify(|w| w.set_algomode3(false));
+ p.cr().modify(|w| w.set_algomode0(6));
+ let iv1r = p.csgcmccmr(7).read() - 1;
+ p.init(1).ivrr().write_value(iv1r);
+ p.cr().modify(|w| w.set_crypen(true));
+ }
+ [0; 4]
}
- fn post_final_block(&self, p: &pac::cryp::Cryp, dir: Direction, int_data: &[u8; AES_BLOCK_SIZE]) {
+ fn post_final_block(
+ &self,
+ p: &pac::cryp::Cryp,
+ dir: Direction,
+ int_data: &[u8; AES_BLOCK_SIZE],
+ _temp1: [u32; 4],
+ _padding_mask: [u8; 16],
+ ) {
if dir == Direction::Encrypt {
//Handle special GCM partial block process.
p.cr().modify(|w| w.set_crypen(false));
@@ -281,17 +306,27 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> {
while p.cr().read().crypen() {}
}
- fn pre_final_block(&self, p: &pac::cryp::Cryp) {
+ fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction) -> [u32; 4] {
//Handle special GCM partial block process.
- p.cr().modify(|w| w.set_crypen(false));
- p.cr().modify(|w| w.set_algomode3(false));
- p.cr().modify(|w| w.set_algomode0(6));
- let iv1r = p.csgcmccmr(7).read() - 1;
- p.init(1).ivrr().write_value(iv1r);
- p.cr().modify(|w| w.set_crypen(true));
+ if dir == Direction::Encrypt {
+ p.cr().modify(|w| w.set_crypen(false));
+ p.cr().modify(|w| w.set_algomode3(false));
+ p.cr().modify(|w| w.set_algomode0(6));
+ let iv1r = p.csgcmccmr(7).read() - 1;
+ p.init(1).ivrr().write_value(iv1r);
+ p.cr().modify(|w| w.set_crypen(true));
+ }
+ [0; 4]
}
- fn post_final_block(&self, p: &pac::cryp::Cryp, dir: Direction, int_data: &[u8; AES_BLOCK_SIZE]) {
+ fn post_final_block(
+ &self,
+ p: &pac::cryp::Cryp,
+ dir: Direction,
+ int_data: &[u8; AES_BLOCK_SIZE],
+ _temp1: [u32; 4],
+ _padding_mask: [u8; 16],
+ ) {
if dir == Direction::Encrypt {
//Handle special GCM partial block process.
p.cr().modify(|w| w.set_crypen(false));
@@ -320,49 +355,180 @@ impl<'c> CipherSized for AesGmac<'c, { 192 / 8 }> {}
impl<'c> CipherSized for AesGmac<'c, { 256 / 8 }> {}
impl<'c, const KEY_SIZE: usize> CipherAuthenticated for AesGmac<'c, KEY_SIZE> {}
-// struct AesCcm<'c, const KEY_SIZE: usize> {
-// iv: &'c [u8],
-// key: &'c [u8; KEY_SIZE],
-// aad_len: usize,
-// payload_len: usize,
-// }
+pub struct AesCcm<'c, const KEY_SIZE: usize> {
+ key: &'c [u8; KEY_SIZE],
+ aad_header: [u8; 6],
+ aad_header_len: usize,
+ block0: [u8; 16],
+ ctr: [u8; 16],
+}
-// impl<'c, const KEY_SIZE: usize> AesCcm<'c, KEY_SIZE> {
-// pub fn new(&self, key: &[u8; KEY_SIZE], iv: &[u8], aad_len: usize, payload_len: usize) {
-// if iv.len() > 13 {
-// panic!("CCM IV length must be 13 bytes or less.");
-// }
-// self.key = key;
-// self.iv = iv;
-// self.aad_len = aad_len;
-// self.payload_len = payload_len;
-// }
-// }
+impl<'c, const KEY_SIZE: usize> AesCcm<'c, KEY_SIZE> {
+ pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8], aad_len: usize, payload_len: usize, tag_len: u8) -> Self {
+ if (iv.len()) > 13 || (iv.len() < 7) {
+ panic!("CCM IV length must be 7-13 bytes.");
+ }
+ if (tag_len < 4) || (tag_len > 16) {
+ panic!("Tag length must be between 4 and 16 bytes.");
+ }
+ if tag_len % 2 > 0 {
+ panic!("Tag length must be a multiple of 2 bytes.");
+ }
-// impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesCcm<'c, KEY_SIZE> {
-// const BLOCK_SIZE: usize = AES_BLOCK_SIZE;
+ let mut aad_header: [u8; 6] = [0; 6];
+ let mut aad_header_len = 0;
+ let mut block0: [u8; 16] = [0; 16];
+ if aad_len != 0 {
+ if aad_len < 65280 {
+ aad_header[0] = (aad_len >> 8) as u8 & 0xFF;
+ aad_header[1] = aad_len as u8 & 0xFF;
+ aad_header_len = 2;
+ } else {
+ aad_header[0] = 0xFF;
+ aad_header[1] = 0xFE;
+ let aad_len_bytes: [u8; 4] = aad_len.to_be_bytes();
+ aad_header[2] = aad_len_bytes[0];
+ aad_header[3] = aad_len_bytes[1];
+ aad_header[4] = aad_len_bytes[2];
+ aad_header[5] = aad_len_bytes[3];
+ aad_header_len = 6;
+ }
+ }
+ let total_aad_len = aad_header_len + aad_len;
+ let mut aad_padding_len = 16 - (total_aad_len % 16);
+ if aad_padding_len == 16 {
+ aad_padding_len = 0;
+ }
+ aad_header_len += aad_padding_len;
+ let total_aad_len_padded = aad_header_len + aad_len;
+ if total_aad_len_padded > 0 {
+ block0[0] = 0x40;
+ }
+ block0[0] |= (((tag_len - 2) >> 1) & 0x07) << 3;
+ block0[0] |= ((15 - (iv.len() as u8)) - 1) & 0x07;
+ block0[1..1 + iv.len()].copy_from_slice(iv);
+ let payload_len_bytes: [u8; 4] = payload_len.to_be_bytes();
+ if iv.len() <= 11 {
+ block0[12] = payload_len_bytes[0];
+ } else if payload_len_bytes[0] > 0 {
+ panic!("Message is too large for given IV size.");
+ }
+ if iv.len() <= 12 {
+ block0[13] = payload_len_bytes[1];
+ } else if payload_len_bytes[1] > 0 {
+ panic!("Message is too large for given IV size.");
+ }
+ block0[14] = payload_len_bytes[2];
+ block0[15] = payload_len_bytes[3];
+ let mut ctr: [u8; 16] = [0; 16];
+ ctr[0] = block0[0] & 0x07;
+ ctr[1..1 + iv.len()].copy_from_slice(&block0[1..1 + iv.len()]);
+ ctr[15] = 0x01;
-// fn key(&self) -> &'c [u8] {
-// self.key
-// }
+ return Self {
+ key: key,
+ aad_header: aad_header,
+ aad_header_len: aad_header_len,
+ block0: block0,
+ ctr: ctr,
+ };
+ }
+}
-// fn iv(&self) -> &'c [u8] {
-// self.iv
-// }
+impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesCcm<'c, KEY_SIZE> {
+ const BLOCK_SIZE: usize = AES_BLOCK_SIZE;
-// fn set_algomode(&self, p: &pac::cryp::Cryp) {
-// p.cr().modify(|w| w.set_algomode0(1));
-// p.cr().modify(|w| w.set_algomode3(true));
-// }
+ fn key(&self) -> &'c [u8] {
+ self.key
+ }
-// fn init_phase(&self, p: &pac::cryp::Cryp) {
-// todo!();
-// }
-// }
+ fn iv(&self) -> &[u8] {
+ self.ctr.as_slice()
+ }
-// impl<'c> CipherSized for AesCcm<'c, { 128 / 8 }> {}
-// impl<'c> CipherSized for AesCcm<'c, { 192 / 8 }> {}
-// impl<'c> CipherSized for AesCcm<'c, { 256 / 8 }> {}
+ fn set_algomode(&self, p: &pac::cryp::Cryp) {
+ p.cr().modify(|w| w.set_algomode0(1));
+ p.cr().modify(|w| w.set_algomode3(true));
+ }
+
+ fn init_phase(&self, p: &pac::cryp::Cryp) {
+ p.cr().modify(|w| w.set_gcm_ccmph(0));
+
+ let mut index = 0;
+ let end_index = index + Self::BLOCK_SIZE;
+ // Write block in
+ while index < end_index {
+ let mut in_word: [u8; 4] = [0; 4];
+ in_word.copy_from_slice(&self.block0[index..index + 4]);
+ p.din().write_value(u32::from_ne_bytes(in_word));
+ index += 4;
+ }
+ p.cr().modify(|w| w.set_crypen(true));
+ while p.cr().read().crypen() {}
+ }
+
+ fn get_header_block(&self) -> &[u8] {
+ return &self.aad_header[0..self.aad_header_len];
+ }
+
+ fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction) -> [u32; 4] {
+ //Handle special CCM partial block process.
+ let mut temp1 = [0; 4];
+ if dir == Direction::Decrypt {
+ p.cr().modify(|w| w.set_crypen(false));
+ let iv1temp = p.init(1).ivrr().read();
+ temp1[0] = p.csgcmccmr(0).read();
+ temp1[1] = p.csgcmccmr(1).read();
+ temp1[2] = p.csgcmccmr(2).read();
+ temp1[3] = p.csgcmccmr(3).read();
+ p.init(1).ivrr().write_value(iv1temp);
+ p.cr().modify(|w| w.set_algomode3(false));
+ p.cr().modify(|w| w.set_algomode0(6));
+ p.cr().modify(|w| w.set_crypen(true));
+ }
+ return temp1;
+ }
+
+ fn post_final_block(
+ &self,
+ p: &pac::cryp::Cryp,
+ dir: Direction,
+ int_data: &[u8; AES_BLOCK_SIZE],
+ temp1: [u32; 4],
+ padding_mask: [u8; 16],
+ ) {
+ if dir == Direction::Decrypt {
+ //Handle special CCM partial block process.
+ let mut intdata_o: [u32; 4] = [0; 4];
+ for i in 0..intdata_o.len() {
+ intdata_o[i] = p.dout().read();
+ }
+ let mut temp2 = [0; 4];
+ temp2[0] = p.csgcmccmr(0).read();
+ temp2[1] = p.csgcmccmr(1).read();
+ temp2[2] = p.csgcmccmr(2).read();
+ temp2[3] = p.csgcmccmr(3).read();
+ p.cr().write(|w| w.set_algomode3(true));
+ p.cr().write(|w| w.set_algomode0(1));
+ p.cr().modify(|w| w.set_gcm_ccmph(3));
+ // Header phase
+ p.cr().modify(|w| w.set_gcm_ccmph(1));
+ let mut in_data: [u32; 4] = [0; 4];
+ for i in 0..in_data.len() {
+ let mut mask_bytes: [u8; 4] = [0; 4];
+ mask_bytes.copy_from_slice(&padding_mask[(i * 4)..(i * 4) + 4]);
+ let mask_word = u32::from_le_bytes(mask_bytes);
+ in_data[i] = intdata_o[i] & mask_word;
+ in_data[i] = in_data[i] ^ temp1[i] ^ temp2[i];
+ }
+ }
+ }
+}
+
+impl<'c> CipherSized for AesCcm<'c, { 128 / 8 }> {}
+impl<'c> CipherSized for AesCcm<'c, { 192 / 8 }> {}
+impl<'c> CipherSized for AesCcm<'c, { 256 / 8 }> {}
+impl<'c, const KEY_SIZE: usize> CipherAuthenticated for AesCcm<'c, KEY_SIZE> {}
/// Holds the state information for a cipher operation.
/// Allows suspending/resuming of cipher operations.
@@ -371,6 +537,7 @@ pub struct Context<'c, C: Cipher<'c> + CipherSized> {
cipher: &'c C,
dir: Direction,
last_block_processed: bool,
+ header_processed: bool,
aad_complete: bool,
cr: u32,
iv: [u32; 4],
@@ -378,6 +545,8 @@ pub struct Context<'c, C: Cipher<'c> + CipherSized> {
csgcm: [u32; 8],
header_len: u64,
payload_len: u64,
+ aad_buffer: [u8; 16],
+ aad_buffer_len: usize,
}
/// Selects whether the crypto processor operates in encryption or decryption mode.
@@ -420,6 +589,9 @@ impl<'d, T: Instance> Cryp<'d, T> {
payload_len: 0,
cipher: cipher,
phantom_data: PhantomData,
+ header_processed: false,
+ aad_buffer: [0; 16],
+ aad_buffer_len: 0,
};
T::regs().cr().modify(|w| w.set_crypen(false));
@@ -492,16 +664,9 @@ impl<'d, T: Instance> Cryp<'d, T> {
) {
self.load_context(ctx);
- let last_block_remainder = aad.len() % C::BLOCK_SIZE;
-
// Perform checks for correctness.
if ctx.aad_complete {
- panic!("Cannot update AAD after calling 'update'!")
- }
- if !last_aad_block {
- if last_block_remainder != 0 {
- panic!("Input length must be a multiple of {} bytes.", C::BLOCK_SIZE);
- }
+ panic!("Cannot update AAD after starting payload!")
}
ctx.header_len += aad.len() as u64;
@@ -511,11 +676,49 @@ impl<'d, T: Instance> Cryp<'d, T> {
T::regs().cr().modify(|w| w.set_gcm_ccmph(1));
T::regs().cr().modify(|w| w.set_crypen(true));
- // Load data into core, block by block.
- let num_full_blocks = aad.len() / C::BLOCK_SIZE;
- for block in 0..num_full_blocks {
- let mut index = block * C::BLOCK_SIZE;
- let end_index = index + C::BLOCK_SIZE;
+ // First write the header B1 block if not yet written.
+ if !ctx.header_processed {
+ ctx.header_processed = true;
+ let header = ctx.cipher.get_header_block();
+ ctx.aad_buffer[0..header.len()].copy_from_slice(header);
+ ctx.aad_buffer_len += header.len();
+ }
+
+ // Fill the header block to make a full block.
+ let len_to_copy = min(aad.len(), C::BLOCK_SIZE - ctx.aad_buffer_len);
+ ctx.aad_buffer[ctx.aad_buffer_len..ctx.aad_buffer_len + len_to_copy].copy_from_slice(&aad[..len_to_copy]);
+ ctx.aad_buffer_len += len_to_copy;
+ ctx.aad_buffer[ctx.aad_buffer_len..].fill(0);
+ let mut aad_len_remaining = aad.len() - len_to_copy;
+
+ if ctx.aad_buffer_len < C::BLOCK_SIZE {
+ // The buffer isn't full and this is the last buffer, so process it as is (already padded).
+ if last_aad_block {
+ let mut index = 0;
+ let end_index = C::BLOCK_SIZE;
+ // Write block in
+ while index < end_index {
+ let mut in_word: [u8; 4] = [0; 4];
+ in_word.copy_from_slice(&aad[index..index + 4]);
+ T::regs().din().write_value(u32::from_ne_bytes(in_word));
+ index += 4;
+ }
+ // Block until input FIFO is empty.
+ while !T::regs().sr().read().ifem() {}
+
+ // Switch to payload phase.
+ ctx.aad_complete = true;
+ T::regs().cr().modify(|w| w.set_crypen(false));
+ T::regs().cr().modify(|w| w.set_gcm_ccmph(2));
+ T::regs().cr().modify(|w| w.fflush());
+ } else {
+ // Just return because we don't yet have a full block to process.
+ return;
+ }
+ } else {
+ // Load the full block from the buffer.
+ let mut index = 0;
+ let end_index = C::BLOCK_SIZE;
// Write block in
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
@@ -527,20 +730,26 @@ impl<'d, T: Instance> Cryp<'d, T> {
while !T::regs().sr().read().ifem() {}
}
- // Handle the final block, which is incomplete.
- if last_block_remainder > 0 {
- let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE];
- last_block[..last_block_remainder].copy_from_slice(&aad[aad.len() - last_block_remainder..aad.len()]);
- let mut index = 0;
- let end_index = C::BLOCK_SIZE;
+ // Handle a partial block that is passed in.
+ ctx.aad_buffer_len = 0;
+ let leftovers = aad_len_remaining % C::BLOCK_SIZE;
+ ctx.aad_buffer[..leftovers].copy_from_slice(&aad[aad.len() - leftovers..aad.len()]);
+ aad_len_remaining -= leftovers;
+ assert_eq!(aad_len_remaining % C::BLOCK_SIZE, 0);
+
+ // Load full data blocks into core.
+ let num_full_blocks = aad_len_remaining / C::BLOCK_SIZE;
+ for _ in 0..num_full_blocks {
+ let mut index = len_to_copy;
+ let end_index = len_to_copy + C::BLOCK_SIZE;
// Write block in
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
- in_word.copy_from_slice(&last_block[index..index + 4]);
+ in_word.copy_from_slice(&aad[index..index + 4]);
T::regs().din().write_value(u32::from_ne_bytes(in_word));
index += 4;
}
- // Block until input FIFO is empty
+ // Block until input FIFO is empty.
while !T::regs().sr().read().ifem() {}
}
@@ -630,7 +839,7 @@ impl<'d, T: Instance> Cryp<'d, T> {
// Handle the final block, which is incomplete.
if last_block_remainder > 0 {
- ctx.cipher.pre_final_block(&T::regs());
+ let temp1 = ctx.cipher.pre_final_block(&T::regs(), ctx.dir);
let mut intermediate_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE];
let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE];
@@ -660,10 +869,15 @@ impl<'d, T: Instance> Cryp<'d, T> {
output[output_len - last_block_remainder..output_len]
.copy_from_slice(&intermediate_data[0..last_block_remainder]);
- ctx.cipher.post_final_block(&T::regs(), ctx.dir, &intermediate_data);
+ let mut mask: [u8; 16] = [0; 16];
+ mask[..last_block_remainder].fill(0xFF);
+ ctx.cipher
+ .post_final_block(&T::regs(), ctx.dir, &intermediate_data, temp1, mask);
}
ctx.payload_len += input.len() as u64;
+
+ self.store_context(ctx);
}
/// This function only needs to be called for GCM, CCM, and GMAC modes to
From 1e21b758f795b5cc8a2331aacbc2a9a39bb7a7fb Mon Sep 17 00:00:00 2001
From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com>
Date: Tue, 20 Feb 2024 14:27:37 -0500
Subject: [PATCH 08/23] Corrected GCM tag generation.
---
embassy-stm32/src/cryp/mod.rs | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs
index fe248def1..81446e39e 100644
--- a/embassy-stm32/src/cryp/mod.rs
+++ b/embassy-stm32/src/cryp/mod.rs
@@ -45,7 +45,7 @@ pub trait Cipher<'c> {
&self,
_p: &pac::cryp::Cryp,
_dir: Direction,
- _int_data: &[u8; AES_BLOCK_SIZE],
+ _int_data: &mut [u8; AES_BLOCK_SIZE],
_temp1: [u32; 4],
_padding_mask: [u8; 16],
) {
@@ -236,16 +236,18 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> {
&self,
p: &pac::cryp::Cryp,
dir: Direction,
- int_data: &[u8; AES_BLOCK_SIZE],
+ int_data: &mut [u8; AES_BLOCK_SIZE],
_temp1: [u32; 4],
- _padding_mask: [u8; 16],
+ padding_mask: [u8; AES_BLOCK_SIZE],
) {
if dir == Direction::Encrypt {
//Handle special GCM partial block process.
p.cr().modify(|w| w.set_crypen(false));
- p.cr().write(|w| w.set_algomode3(true));
- p.cr().write(|w| w.set_algomode0(0));
- p.init(1).ivrr().write_value(2);
+ p.cr().modify(|w| w.set_algomode3(true));
+ p.cr().modify(|w| w.set_algomode0(0));
+ for i in 0..AES_BLOCK_SIZE {
+ int_data[i] = int_data[i] & padding_mask[i];
+ }
p.cr().modify(|w| w.set_crypen(true));
p.cr().modify(|w| w.set_gcm_ccmph(3));
let mut index = 0;
@@ -323,7 +325,7 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> {
&self,
p: &pac::cryp::Cryp,
dir: Direction,
- int_data: &[u8; AES_BLOCK_SIZE],
+ int_data: &mut [u8; AES_BLOCK_SIZE],
_temp1: [u32; 4],
_padding_mask: [u8; 16],
) {
@@ -493,7 +495,7 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesCcm<'c, KEY_SIZE> {
&self,
p: &pac::cryp::Cryp,
dir: Direction,
- int_data: &[u8; AES_BLOCK_SIZE],
+ int_data: &mut [u8; AES_BLOCK_SIZE],
temp1: [u32; 4],
padding_mask: [u8; 16],
) {
@@ -872,7 +874,7 @@ impl<'d, T: Instance> Cryp<'d, T> {
let mut mask: [u8; 16] = [0; 16];
mask[..last_block_remainder].fill(0xFF);
ctx.cipher
- .post_final_block(&T::regs(), ctx.dir, &intermediate_data, temp1, mask);
+ .post_final_block(&T::regs(), ctx.dir, &mut intermediate_data, temp1, mask);
}
ctx.payload_len += input.len() as u64;
From f64a62149e423f6fdb643f7343d971eedc4a3a12 Mon Sep 17 00:00:00 2001
From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com>
Date: Tue, 20 Feb 2024 15:26:31 -0500
Subject: [PATCH 09/23] Corrected CCM partial block ops.
---
embassy-stm32/src/cryp/mod.rs | 46 ++++++++++++++++++-----------------
1 file changed, 24 insertions(+), 22 deletions(-)
diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs
index 81446e39e..634c85883 100644
--- a/embassy-stm32/src/cryp/mod.rs
+++ b/embassy-stm32/src/cryp/mod.rs
@@ -327,14 +327,16 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> {
dir: Direction,
int_data: &mut [u8; AES_BLOCK_SIZE],
_temp1: [u32; 4],
- _padding_mask: [u8; 16],
+ padding_mask: [u8; AES_BLOCK_SIZE],
) {
if dir == Direction::Encrypt {
//Handle special GCM partial block process.
p.cr().modify(|w| w.set_crypen(false));
- p.cr().write(|w| w.set_algomode3(true));
- p.cr().write(|w| w.set_algomode0(0));
- p.init(1).ivrr().write_value(2);
+ p.cr().modify(|w| w.set_algomode3(true));
+ p.cr().modify(|w| w.set_algomode0(0));
+ for i in 0..AES_BLOCK_SIZE {
+ int_data[i] = int_data[i] & padding_mask[i];
+ }
p.cr().modify(|w| w.set_crypen(true));
p.cr().modify(|w| w.set_gcm_ccmph(3));
let mut index = 0;
@@ -479,10 +481,10 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesCcm<'c, KEY_SIZE> {
if dir == Direction::Decrypt {
p.cr().modify(|w| w.set_crypen(false));
let iv1temp = p.init(1).ivrr().read();
- temp1[0] = p.csgcmccmr(0).read();
- temp1[1] = p.csgcmccmr(1).read();
- temp1[2] = p.csgcmccmr(2).read();
- temp1[3] = p.csgcmccmr(3).read();
+ temp1[0] = p.csgcmccmr(0).read().swap_bytes();
+ temp1[1] = p.csgcmccmr(1).read().swap_bytes();
+ temp1[2] = p.csgcmccmr(2).read().swap_bytes();
+ temp1[3] = p.csgcmccmr(3).read().swap_bytes();
p.init(1).ivrr().write_value(iv1temp);
p.cr().modify(|w| w.set_algomode3(false));
p.cr().modify(|w| w.set_algomode0(6));
@@ -501,27 +503,27 @@ impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesCcm<'c, KEY_SIZE> {
) {
if dir == Direction::Decrypt {
//Handle special CCM partial block process.
- let mut intdata_o: [u32; 4] = [0; 4];
- for i in 0..intdata_o.len() {
- intdata_o[i] = p.dout().read();
- }
let mut temp2 = [0; 4];
- temp2[0] = p.csgcmccmr(0).read();
- temp2[1] = p.csgcmccmr(1).read();
- temp2[2] = p.csgcmccmr(2).read();
- temp2[3] = p.csgcmccmr(3).read();
- p.cr().write(|w| w.set_algomode3(true));
- p.cr().write(|w| w.set_algomode0(1));
+ temp2[0] = p.csgcmccmr(0).read().swap_bytes();
+ temp2[1] = p.csgcmccmr(1).read().swap_bytes();
+ temp2[2] = p.csgcmccmr(2).read().swap_bytes();
+ temp2[3] = p.csgcmccmr(3).read().swap_bytes();
+ p.cr().modify(|w| w.set_algomode3(true));
+ p.cr().modify(|w| w.set_algomode0(1));
p.cr().modify(|w| w.set_gcm_ccmph(3));
// Header phase
p.cr().modify(|w| w.set_gcm_ccmph(1));
+ for i in 0..AES_BLOCK_SIZE {
+ int_data[i] = int_data[i] & padding_mask[i];
+ }
let mut in_data: [u32; 4] = [0; 4];
for i in 0..in_data.len() {
- let mut mask_bytes: [u8; 4] = [0; 4];
- mask_bytes.copy_from_slice(&padding_mask[(i * 4)..(i * 4) + 4]);
- let mask_word = u32::from_le_bytes(mask_bytes);
- in_data[i] = intdata_o[i] & mask_word;
+ let mut int_bytes: [u8; 4] = [0; 4];
+ int_bytes.copy_from_slice(&int_data[(i * 4)..(i * 4) + 4]);
+ let int_word = u32::from_le_bytes(int_bytes);
+ in_data[i] = int_word;
in_data[i] = in_data[i] ^ temp1[i] ^ temp2[i];
+ p.din().write_value(in_data[i]);
}
}
}
From 14c2c28e068d6e506c372611800e6dded8d8f440 Mon Sep 17 00:00:00 2001
From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com>
Date: Tue, 20 Feb 2024 18:05:35 -0500
Subject: [PATCH 10/23] Corrected additional associated data operation.
---
embassy-stm32/src/cryp/mod.rs | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs
index 634c85883..d53252a6a 100644
--- a/embassy-stm32/src/cryp/mod.rs
+++ b/embassy-stm32/src/cryp/mod.rs
@@ -703,7 +703,7 @@ impl<'d, T: Instance> Cryp<'d, T> {
// Write block in
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
- in_word.copy_from_slice(&aad[index..index + 4]);
+ in_word.copy_from_slice(&ctx.aad_buffer[index..index + 4]);
T::regs().din().write_value(u32::from_ne_bytes(in_word));
index += 4;
}
From 29d8b459568b53f1e281d0914b5c897206c9bd4b Mon Sep 17 00:00:00 2001
From: Caleb Garrett <47389035+caleb-garrett@users.noreply.github.com>
Date: Wed, 21 Feb 2024 12:07:53 -0500
Subject: [PATCH 11/23] Add DES and TDES support. Support variable tag sizes.
---
embassy-stm32/src/cryp/mod.rs | 237 +++++++++++++++++++++++++++++-----
1 file changed, 203 insertions(+), 34 deletions(-)
diff --git a/embassy-stm32/src/cryp/mod.rs b/embassy-stm32/src/cryp/mod.rs
index d53252a6a..a4f1e42dc 100644
--- a/embassy-stm32/src/cryp/mod.rs
+++ b/embassy-stm32/src/cryp/mod.rs
@@ -60,8 +60,152 @@ pub trait Cipher<'c> {
/// This trait enables restriction of ciphers to specific key sizes.
pub trait CipherSized {}
+/// This trait enables restriction of initialization vectors to sizes compatibile with a cipher mode.
+pub trait IVSized {}
+
/// This trait enables restriction of a header phase to authenticated ciphers only.
-pub trait CipherAuthenticated {}
+pub trait CipherAuthenticated + 'd) -> Self {
- CRYP::enable_and_reset();
+ T::enable_and_reset();
into_ref!(peri);
let instance = Self { _peripheral: peri };
instance
diff --git a/examples/stm32f7/src/bin/cryp.rs b/examples/stm32f7/src/bin/cryp.rs
index be41955c5..04927841a 100644
--- a/examples/stm32f7/src/bin/cryp.rs
+++ b/examples/stm32f7/src/bin/cryp.rs
@@ -1,10 +1,9 @@
#![no_std]
#![no_main]
-use aes_gcm::{
- aead::{heapless::Vec, AeadInPlace, KeyInit},
- Aes128Gcm,
-};
+use aes_gcm::aead::heapless::Vec;
+use aes_gcm::aead::{AeadInPlace, KeyInit};
+use aes_gcm::Aes128Gcm;
use defmt::info;
use embassy_executor::Spawner;
use embassy_stm32::cryp::*;
@@ -55,9 +54,12 @@ async fn main(_spawner: Spawner) -> ! {
let mut payload_vec: Vec