2024-02-25 20:59:07 -05:00

1026 lines
36 KiB
Rust

//! Crypto Accelerator (CRYP)
use core::cmp::min;
use core::marker::PhantomData;
use embassy_hal_internal::{into_ref, PeripheralRef};
use crate::pac;
use crate::peripherals::CRYP;
use crate::rcc::sealed::RccPeripheral;
use crate::{interrupt, peripherals, Peripheral};
const DES_BLOCK_SIZE: usize = 8; // 64 bits
const AES_BLOCK_SIZE: usize = 16; // 128 bits
/// This trait encapsulates all cipher-specific behavior/
pub trait Cipher<'c> {
/// Processing block size. Determined by the processor and the algorithm.
const BLOCK_SIZE: usize;
/// Indicates whether the cipher requires the application to provide padding.
/// If `true`, no partial blocks will be accepted (a panic will occur).
const REQUIRES_PADDING: bool = false;
/// Returns the symmetric key.
fn key(&self) -> &[u8];
/// Returns the initialization vector.
fn iv(&self) -> &[u8];
/// Sets the processor algorithm mode according to the associated cipher.
fn set_algomode(&self, p: &pac::cryp::Cryp);
/// Performs any key preparation within the processor, if necessary.
fn prepare_key(&self, _p: &pac::cryp::Cryp) {}
/// Performs any cipher-specific initialization.
fn init_phase(&self, _p: &pac::cryp::Cryp) {}
/// Called prior to processing the last data block for cipher-specific operations.
fn pre_final_block(&self, _p: &pac::cryp::Cryp, _dir: Direction) -> [u32; 4] {
return [0; 4];
}
/// Called after processing the last data block for cipher-specific operations.
fn post_final_block(
&self,
_p: &pac::cryp::Cryp,
_dir: Direction,
_int_data: &mut [u8; AES_BLOCK_SIZE],
_temp1: [u32; 4],
_padding_mask: [u8; 16],
) {
}
/// Called prior to processing the first associated data block for cipher-specific operations.
fn get_header_block(&self) -> &[u8] {
return [0; 0].as_slice();
}
}
/// This trait enables restriction of ciphers to specific key sizes.
pub trait CipherSized {}
/// This trait enables restriction of a header phase to authenticated ciphers only.
pub trait CipherAuthenticated {}
/// AES-ECB Cipher Mode
pub struct AesEcb<'c, const KEY_SIZE: usize> {
iv: &'c [u8; 0],
key: &'c [u8; KEY_SIZE],
}
impl<'c, const KEY_SIZE: usize> AesEcb<'c, KEY_SIZE> {
/// Constructs a new AES-ECB cipher for a cryptographic operation.
pub fn new(key: &'c [u8; KEY_SIZE]) -> Self {
return Self { key: key, iv: &[0; 0] };
}
}
impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesEcb<'c, KEY_SIZE> {
const BLOCK_SIZE: usize = AES_BLOCK_SIZE;
const REQUIRES_PADDING: bool = true;
fn key(&self) -> &'c [u8] {
self.key
}
fn iv(&self) -> &'c [u8] {
self.iv
}
fn prepare_key(&self, p: &pac::cryp::Cryp) {
p.cr().modify(|w| w.set_algomode0(7));
p.cr().modify(|w| w.set_algomode3(false));
p.cr().modify(|w| w.set_crypen(true));
while p.sr().read().busy() {}
}
fn set_algomode(&self, p: &pac::cryp::Cryp) {
p.cr().modify(|w| w.set_algomode0(4));
p.cr().modify(|w| w.set_algomode3(false));
}
}
impl<'c> CipherSized for AesEcb<'c, { 128 / 8 }> {}
impl<'c> CipherSized for AesEcb<'c, { 192 / 8 }> {}
impl<'c> CipherSized for AesEcb<'c, { 256 / 8 }> {}
/// AES-CBC Cipher Mode
pub struct AesCbc<'c, const KEY_SIZE: usize> {
iv: &'c [u8; 16],
key: &'c [u8; KEY_SIZE],
}
impl<'c, const KEY_SIZE: usize> AesCbc<'c, KEY_SIZE> {
/// Constructs a new AES-CBC cipher for a cryptographic operation.
pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; 16]) -> Self {
return Self { key: key, iv: iv };
}
}
impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesCbc<'c, KEY_SIZE> {
const BLOCK_SIZE: usize = AES_BLOCK_SIZE;
const REQUIRES_PADDING: bool = true;
fn key(&self) -> &'c [u8] {
self.key
}
fn iv(&self) -> &'c [u8] {
self.iv
}
fn prepare_key(&self, p: &pac::cryp::Cryp) {
p.cr().modify(|w| w.set_algomode0(7));
p.cr().modify(|w| w.set_algomode3(false));
p.cr().modify(|w| w.set_crypen(true));
while p.sr().read().busy() {}
}
fn set_algomode(&self, p: &pac::cryp::Cryp) {
p.cr().modify(|w| w.set_algomode0(5));
p.cr().modify(|w| w.set_algomode3(false));
}
}
impl<'c> CipherSized for AesCbc<'c, { 128 / 8 }> {}
impl<'c> CipherSized for AesCbc<'c, { 192 / 8 }> {}
impl<'c> CipherSized for AesCbc<'c, { 256 / 8 }> {}
/// AES-CTR Cipher Mode
pub struct AesCtr<'c, const KEY_SIZE: usize> {
iv: &'c [u8; 16],
key: &'c [u8; KEY_SIZE],
}
impl<'c, const KEY_SIZE: usize> AesCtr<'c, KEY_SIZE> {
/// Constructs a new AES-CTR cipher for a cryptographic operation.
pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; 16]) -> Self {
return Self { key: key, iv: iv };
}
}
impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesCtr<'c, KEY_SIZE> {
const BLOCK_SIZE: usize = AES_BLOCK_SIZE;
fn key(&self) -> &'c [u8] {
self.key
}
fn iv(&self) -> &'c [u8] {
self.iv
}
fn set_algomode(&self, p: &pac::cryp::Cryp) {
p.cr().modify(|w| w.set_algomode0(6));
p.cr().modify(|w| w.set_algomode3(false));
}
}
impl<'c> CipherSized for AesCtr<'c, { 128 / 8 }> {}
impl<'c> CipherSized for AesCtr<'c, { 192 / 8 }> {}
impl<'c> CipherSized for AesCtr<'c, { 256 / 8 }> {}
///AES-GCM Cipher Mode
pub struct AesGcm<'c, const KEY_SIZE: usize> {
iv: [u8; 16],
key: &'c [u8; KEY_SIZE],
}
impl<'c, const KEY_SIZE: usize> AesGcm<'c, KEY_SIZE> {
/// Constucts a new AES-GCM cipher for a cryptographic operation.
pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; 12]) -> Self {
let mut new_gcm = Self { key: key, iv: [0; 16] };
new_gcm.iv[..12].copy_from_slice(iv);
new_gcm.iv[15] = 2;
new_gcm
}
}
impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGcm<'c, KEY_SIZE> {
const BLOCK_SIZE: usize = AES_BLOCK_SIZE;
fn key(&self) -> &'c [u8] {
self.key
}
fn iv(&self) -> &[u8] {
self.iv.as_slice()
}
fn set_algomode(&self, p: &pac::cryp::Cryp) {
p.cr().modify(|w| w.set_algomode0(0));
p.cr().modify(|w| w.set_algomode3(true));
}
fn init_phase(&self, p: &pac::cryp::Cryp) {
p.cr().modify(|w| w.set_gcm_ccmph(0));
p.cr().modify(|w| w.set_crypen(true));
while p.cr().read().crypen() {}
}
fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction) -> [u32; 4] {
//Handle special GCM partial block process.
if dir == Direction::Encrypt {
p.cr().modify(|w| w.set_crypen(false));
p.cr().modify(|w| w.set_algomode3(false));
p.cr().modify(|w| w.set_algomode0(6));
let iv1r = p.csgcmccmr(7).read() - 1;
p.init(1).ivrr().write_value(iv1r);
p.cr().modify(|w| w.set_crypen(true));
}
[0; 4]
}
fn post_final_block(
&self,
p: &pac::cryp::Cryp,
dir: Direction,
int_data: &mut [u8; AES_BLOCK_SIZE],
_temp1: [u32; 4],
padding_mask: [u8; AES_BLOCK_SIZE],
) {
if dir == Direction::Encrypt {
//Handle special GCM partial block process.
p.cr().modify(|w| w.set_crypen(false));
p.cr().modify(|w| w.set_algomode3(true));
p.cr().modify(|w| w.set_algomode0(0));
for i in 0..AES_BLOCK_SIZE {
int_data[i] = int_data[i] & padding_mask[i];
}
p.cr().modify(|w| w.set_crypen(true));
p.cr().modify(|w| w.set_gcm_ccmph(3));
let mut index = 0;
let end_index = Self::BLOCK_SIZE;
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
in_word.copy_from_slice(&int_data[index..index + 4]);
p.din().write_value(u32::from_ne_bytes(in_word));
index += 4;
}
for _ in 0..4 {
p.dout().read();
}
}
}
}
impl<'c> CipherSized for AesGcm<'c, { 128 / 8 }> {}
impl<'c> CipherSized for AesGcm<'c, { 192 / 8 }> {}
impl<'c> CipherSized for AesGcm<'c, { 256 / 8 }> {}
impl<'c, const KEY_SIZE: usize> CipherAuthenticated for AesGcm<'c, KEY_SIZE> {}
/// AES-GMAC Cipher Mode
pub struct AesGmac<'c, const KEY_SIZE: usize> {
iv: [u8; 16],
key: &'c [u8; KEY_SIZE],
}
impl<'c, const KEY_SIZE: usize> AesGmac<'c, KEY_SIZE> {
/// Constructs a new AES-GMAC cipher for a cryptographic operation.
pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8; 12]) -> Self {
let mut new_gmac = Self { key: key, iv: [0; 16] };
new_gmac.iv[..12].copy_from_slice(iv);
new_gmac.iv[15] = 2;
new_gmac
}
}
impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesGmac<'c, KEY_SIZE> {
const BLOCK_SIZE: usize = AES_BLOCK_SIZE;
fn key(&self) -> &'c [u8] {
self.key
}
fn iv(&self) -> &[u8] {
self.iv.as_slice()
}
fn set_algomode(&self, p: &pac::cryp::Cryp) {
p.cr().modify(|w| w.set_algomode0(0));
p.cr().modify(|w| w.set_algomode3(true));
}
fn init_phase(&self, p: &pac::cryp::Cryp) {
p.cr().modify(|w| w.set_gcm_ccmph(0));
p.cr().modify(|w| w.set_crypen(true));
while p.cr().read().crypen() {}
}
fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction) -> [u32; 4] {
//Handle special GCM partial block process.
if dir == Direction::Encrypt {
p.cr().modify(|w| w.set_crypen(false));
p.cr().modify(|w| w.set_algomode3(false));
p.cr().modify(|w| w.set_algomode0(6));
let iv1r = p.csgcmccmr(7).read() - 1;
p.init(1).ivrr().write_value(iv1r);
p.cr().modify(|w| w.set_crypen(true));
}
[0; 4]
}
fn post_final_block(
&self,
p: &pac::cryp::Cryp,
dir: Direction,
int_data: &mut [u8; AES_BLOCK_SIZE],
_temp1: [u32; 4],
padding_mask: [u8; AES_BLOCK_SIZE],
) {
if dir == Direction::Encrypt {
//Handle special GCM partial block process.
p.cr().modify(|w| w.set_crypen(false));
p.cr().modify(|w| w.set_algomode3(true));
p.cr().modify(|w| w.set_algomode0(0));
for i in 0..AES_BLOCK_SIZE {
int_data[i] = int_data[i] & padding_mask[i];
}
p.cr().modify(|w| w.set_crypen(true));
p.cr().modify(|w| w.set_gcm_ccmph(3));
let mut index = 0;
let end_index = Self::BLOCK_SIZE;
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
in_word.copy_from_slice(&int_data[index..index + 4]);
p.din().write_value(u32::from_ne_bytes(in_word));
index += 4;
}
for _ in 0..4 {
p.dout().read();
}
}
}
}
impl<'c> CipherSized for AesGmac<'c, { 128 / 8 }> {}
impl<'c> CipherSized for AesGmac<'c, { 192 / 8 }> {}
impl<'c> CipherSized for AesGmac<'c, { 256 / 8 }> {}
impl<'c, const KEY_SIZE: usize> CipherAuthenticated for AesGmac<'c, KEY_SIZE> {}
pub struct AesCcm<'c, const KEY_SIZE: usize> {
key: &'c [u8; KEY_SIZE],
aad_header: [u8; 6],
aad_header_len: usize,
block0: [u8; 16],
ctr: [u8; 16],
}
impl<'c, const KEY_SIZE: usize> AesCcm<'c, KEY_SIZE> {
pub fn new(key: &'c [u8; KEY_SIZE], iv: &'c [u8], aad_len: usize, payload_len: usize, tag_len: u8) -> Self {
if (iv.len()) > 13 || (iv.len() < 7) {
panic!("CCM IV length must be 7-13 bytes.");
}
if (tag_len < 4) || (tag_len > 16) {
panic!("Tag length must be between 4 and 16 bytes.");
}
if tag_len % 2 > 0 {
panic!("Tag length must be a multiple of 2 bytes.");
}
let mut aad_header: [u8; 6] = [0; 6];
let mut aad_header_len = 0;
let mut block0: [u8; 16] = [0; 16];
if aad_len != 0 {
if aad_len < 65280 {
aad_header[0] = (aad_len >> 8) as u8 & 0xFF;
aad_header[1] = aad_len as u8 & 0xFF;
aad_header_len = 2;
} else {
aad_header[0] = 0xFF;
aad_header[1] = 0xFE;
let aad_len_bytes: [u8; 4] = aad_len.to_be_bytes();
aad_header[2] = aad_len_bytes[0];
aad_header[3] = aad_len_bytes[1];
aad_header[4] = aad_len_bytes[2];
aad_header[5] = aad_len_bytes[3];
aad_header_len = 6;
}
}
let total_aad_len = aad_header_len + aad_len;
let mut aad_padding_len = 16 - (total_aad_len % 16);
if aad_padding_len == 16 {
aad_padding_len = 0;
}
aad_header_len += aad_padding_len;
let total_aad_len_padded = aad_header_len + aad_len;
if total_aad_len_padded > 0 {
block0[0] = 0x40;
}
block0[0] |= (((tag_len - 2) >> 1) & 0x07) << 3;
block0[0] |= ((15 - (iv.len() as u8)) - 1) & 0x07;
block0[1..1 + iv.len()].copy_from_slice(iv);
let payload_len_bytes: [u8; 4] = payload_len.to_be_bytes();
if iv.len() <= 11 {
block0[12] = payload_len_bytes[0];
} else if payload_len_bytes[0] > 0 {
panic!("Message is too large for given IV size.");
}
if iv.len() <= 12 {
block0[13] = payload_len_bytes[1];
} else if payload_len_bytes[1] > 0 {
panic!("Message is too large for given IV size.");
}
block0[14] = payload_len_bytes[2];
block0[15] = payload_len_bytes[3];
let mut ctr: [u8; 16] = [0; 16];
ctr[0] = block0[0] & 0x07;
ctr[1..1 + iv.len()].copy_from_slice(&block0[1..1 + iv.len()]);
ctr[15] = 0x01;
return Self {
key: key,
aad_header: aad_header,
aad_header_len: aad_header_len,
block0: block0,
ctr: ctr,
};
}
}
impl<'c, const KEY_SIZE: usize> Cipher<'c> for AesCcm<'c, KEY_SIZE> {
const BLOCK_SIZE: usize = AES_BLOCK_SIZE;
fn key(&self) -> &'c [u8] {
self.key
}
fn iv(&self) -> &[u8] {
self.ctr.as_slice()
}
fn set_algomode(&self, p: &pac::cryp::Cryp) {
p.cr().modify(|w| w.set_algomode0(1));
p.cr().modify(|w| w.set_algomode3(true));
}
fn init_phase(&self, p: &pac::cryp::Cryp) {
p.cr().modify(|w| w.set_gcm_ccmph(0));
let mut index = 0;
let end_index = index + Self::BLOCK_SIZE;
// Write block in
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
in_word.copy_from_slice(&self.block0[index..index + 4]);
p.din().write_value(u32::from_ne_bytes(in_word));
index += 4;
}
p.cr().modify(|w| w.set_crypen(true));
while p.cr().read().crypen() {}
}
fn get_header_block(&self) -> &[u8] {
return &self.aad_header[0..self.aad_header_len];
}
fn pre_final_block(&self, p: &pac::cryp::Cryp, dir: Direction) -> [u32; 4] {
//Handle special CCM partial block process.
let mut temp1 = [0; 4];
if dir == Direction::Decrypt {
p.cr().modify(|w| w.set_crypen(false));
let iv1temp = p.init(1).ivrr().read();
temp1[0] = p.csgcmccmr(0).read().swap_bytes();
temp1[1] = p.csgcmccmr(1).read().swap_bytes();
temp1[2] = p.csgcmccmr(2).read().swap_bytes();
temp1[3] = p.csgcmccmr(3).read().swap_bytes();
p.init(1).ivrr().write_value(iv1temp);
p.cr().modify(|w| w.set_algomode3(false));
p.cr().modify(|w| w.set_algomode0(6));
p.cr().modify(|w| w.set_crypen(true));
}
return temp1;
}
fn post_final_block(
&self,
p: &pac::cryp::Cryp,
dir: Direction,
int_data: &mut [u8; AES_BLOCK_SIZE],
temp1: [u32; 4],
padding_mask: [u8; 16],
) {
if dir == Direction::Decrypt {
//Handle special CCM partial block process.
let mut temp2 = [0; 4];
temp2[0] = p.csgcmccmr(0).read().swap_bytes();
temp2[1] = p.csgcmccmr(1).read().swap_bytes();
temp2[2] = p.csgcmccmr(2).read().swap_bytes();
temp2[3] = p.csgcmccmr(3).read().swap_bytes();
p.cr().modify(|w| w.set_algomode3(true));
p.cr().modify(|w| w.set_algomode0(1));
p.cr().modify(|w| w.set_gcm_ccmph(3));
// Header phase
p.cr().modify(|w| w.set_gcm_ccmph(1));
for i in 0..AES_BLOCK_SIZE {
int_data[i] = int_data[i] & padding_mask[i];
}
let mut in_data: [u32; 4] = [0; 4];
for i in 0..in_data.len() {
let mut int_bytes: [u8; 4] = [0; 4];
int_bytes.copy_from_slice(&int_data[(i * 4)..(i * 4) + 4]);
let int_word = u32::from_le_bytes(int_bytes);
in_data[i] = int_word;
in_data[i] = in_data[i] ^ temp1[i] ^ temp2[i];
p.din().write_value(in_data[i]);
}
}
}
}
impl<'c> CipherSized for AesCcm<'c, { 128 / 8 }> {}
impl<'c> CipherSized for AesCcm<'c, { 192 / 8 }> {}
impl<'c> CipherSized for AesCcm<'c, { 256 / 8 }> {}
impl<'c, const KEY_SIZE: usize> CipherAuthenticated for AesCcm<'c, KEY_SIZE> {}
/// Holds the state information for a cipher operation.
/// Allows suspending/resuming of cipher operations.
pub struct Context<'c, C: Cipher<'c> + CipherSized> {
phantom_data: PhantomData<&'c C>,
cipher: &'c C,
dir: Direction,
last_block_processed: bool,
header_processed: bool,
aad_complete: bool,
cr: u32,
iv: [u32; 4],
csgcmccm: [u32; 8],
csgcm: [u32; 8],
header_len: u64,
payload_len: u64,
aad_buffer: [u8; 16],
aad_buffer_len: usize,
}
/// Selects whether the crypto processor operates in encryption or decryption mode.
#[derive(PartialEq, Clone, Copy)]
pub enum Direction {
/// Encryption mode
Encrypt,
/// Decryption mode
Decrypt,
}
/// Crypto Accelerator Driver
pub struct Cryp<'d, T: Instance> {
_peripheral: PeripheralRef<'d, T>,
}
impl<'d, T: Instance> Cryp<'d, T> {
/// Create a new CRYP driver.
pub fn new(peri: impl Peripheral<P = T> + 'd) -> Self {
CRYP::enable_and_reset();
into_ref!(peri);
let instance = Self { _peripheral: peri };
instance
}
/// Start a new cipher operation.
/// Key size must be 128, 192, or 256 bits.
/// Initialization vector must only be supplied if necessary.
/// Panics if there is any mismatch in parameters, such as an incorrect IV length or invalid mode.
pub fn start<'c, C: Cipher<'c> + CipherSized>(&self, cipher: &'c C, dir: Direction) -> Context<'c, C> {
let mut ctx: Context<'c, C> = Context {
dir,
last_block_processed: false,
cr: 0,
iv: [0; 4],
csgcmccm: [0; 8],
csgcm: [0; 8],
aad_complete: false,
header_len: 0,
payload_len: 0,
cipher: cipher,
phantom_data: PhantomData,
header_processed: false,
aad_buffer: [0; 16],
aad_buffer_len: 0,
};
T::regs().cr().modify(|w| w.set_crypen(false));
let key = ctx.cipher.key();
if key.len() == (128 / 8) {
T::regs().cr().modify(|w| w.set_keysize(0));
} else if key.len() == (192 / 8) {
T::regs().cr().modify(|w| w.set_keysize(1));
} else if key.len() == (256 / 8) {
T::regs().cr().modify(|w| w.set_keysize(2));
}
self.load_key(key);
// Set data type to 8-bit. This will match software implementations.
T::regs().cr().modify(|w| w.set_datatype(2));
ctx.cipher.prepare_key(&T::regs());
ctx.cipher.set_algomode(&T::regs());
// Set encrypt/decrypt
if dir == Direction::Encrypt {
T::regs().cr().modify(|w| w.set_algodir(false));
} else {
T::regs().cr().modify(|w| w.set_algodir(true));
}
// Load the IV into the registers.
let iv = ctx.cipher.iv();
let mut full_iv: [u8; 16] = [0; 16];
full_iv[0..iv.len()].copy_from_slice(iv);
let mut iv_idx = 0;
let mut iv_word: [u8; 4] = [0; 4];
iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]);
iv_idx += 4;
T::regs().init(0).ivlr().write_value(u32::from_be_bytes(iv_word));
iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]);
iv_idx += 4;
T::regs().init(0).ivrr().write_value(u32::from_be_bytes(iv_word));
iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]);
iv_idx += 4;
T::regs().init(1).ivlr().write_value(u32::from_be_bytes(iv_word));
iv_word.copy_from_slice(&full_iv[iv_idx..iv_idx + 4]);
T::regs().init(1).ivrr().write_value(u32::from_be_bytes(iv_word));
// Flush in/out FIFOs
T::regs().cr().modify(|w| w.fflush());
ctx.cipher.init_phase(&T::regs());
self.store_context(&mut ctx);
ctx
}
/// Controls the header phase of cipher processing.
/// This function is only valid for GCM, CCM, and GMAC modes.
/// It only needs to be called if using one of these modes and there is associated data.
/// All AAD must be supplied to this function prior to starting the payload phase with `payload_blocking`.
/// The AAD must be supplied in multiples of the block size (128 bits), except when supplying the last block.
/// When supplying the last block of AAD, `last_aad_block` must be `true`.
pub fn aad_blocking<'c, C: Cipher<'c> + CipherSized + CipherAuthenticated>(
&self,
ctx: &mut Context<'c, C>,
aad: &[u8],
last_aad_block: bool,
) {
self.load_context(ctx);
// Perform checks for correctness.
if ctx.aad_complete {
panic!("Cannot update AAD after starting payload!")
}
ctx.header_len += aad.len() as u64;
// Header phase
T::regs().cr().modify(|w| w.set_crypen(false));
T::regs().cr().modify(|w| w.set_gcm_ccmph(1));
T::regs().cr().modify(|w| w.set_crypen(true));
// First write the header B1 block if not yet written.
if !ctx.header_processed {
ctx.header_processed = true;
let header = ctx.cipher.get_header_block();
ctx.aad_buffer[0..header.len()].copy_from_slice(header);
ctx.aad_buffer_len += header.len();
}
// Fill the header block to make a full block.
let len_to_copy = min(aad.len(), C::BLOCK_SIZE - ctx.aad_buffer_len);
ctx.aad_buffer[ctx.aad_buffer_len..ctx.aad_buffer_len + len_to_copy].copy_from_slice(&aad[..len_to_copy]);
ctx.aad_buffer_len += len_to_copy;
ctx.aad_buffer[ctx.aad_buffer_len..].fill(0);
let mut aad_len_remaining = aad.len() - len_to_copy;
if ctx.aad_buffer_len < C::BLOCK_SIZE {
// The buffer isn't full and this is the last buffer, so process it as is (already padded).
if last_aad_block {
let mut index = 0;
let end_index = C::BLOCK_SIZE;
// Write block in
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
in_word.copy_from_slice(&ctx.aad_buffer[index..index + 4]);
T::regs().din().write_value(u32::from_ne_bytes(in_word));
index += 4;
}
// Block until input FIFO is empty.
while !T::regs().sr().read().ifem() {}
// Switch to payload phase.
ctx.aad_complete = true;
T::regs().cr().modify(|w| w.set_crypen(false));
T::regs().cr().modify(|w| w.set_gcm_ccmph(2));
T::regs().cr().modify(|w| w.fflush());
} else {
// Just return because we don't yet have a full block to process.
return;
}
} else {
// Load the full block from the buffer.
let mut index = 0;
let end_index = C::BLOCK_SIZE;
// Write block in
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
in_word.copy_from_slice(&aad[index..index + 4]);
T::regs().din().write_value(u32::from_ne_bytes(in_word));
index += 4;
}
// Block until input FIFO is empty.
while !T::regs().sr().read().ifem() {}
}
// Handle a partial block that is passed in.
ctx.aad_buffer_len = 0;
let leftovers = aad_len_remaining % C::BLOCK_SIZE;
ctx.aad_buffer[..leftovers].copy_from_slice(&aad[aad.len() - leftovers..aad.len()]);
aad_len_remaining -= leftovers;
assert_eq!(aad_len_remaining % C::BLOCK_SIZE, 0);
// Load full data blocks into core.
let num_full_blocks = aad_len_remaining / C::BLOCK_SIZE;
for _ in 0..num_full_blocks {
let mut index = len_to_copy;
let end_index = len_to_copy + C::BLOCK_SIZE;
// Write block in
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
in_word.copy_from_slice(&aad[index..index + 4]);
T::regs().din().write_value(u32::from_ne_bytes(in_word));
index += 4;
}
// Block until input FIFO is empty.
while !T::regs().sr().read().ifem() {}
}
if last_aad_block {
// Switch to payload phase.
ctx.aad_complete = true;
T::regs().cr().modify(|w| w.set_crypen(false));
T::regs().cr().modify(|w| w.set_gcm_ccmph(2));
T::regs().cr().modify(|w| w.fflush());
}
self.store_context(ctx);
}
/// Performs encryption/decryption on the provided context.
/// The context determines algorithm, mode, and state of the crypto accelerator.
/// When the last piece of data is supplied, `last_block` should be `true`.
/// This function panics under various mismatches of parameters.
/// Input and output buffer lengths must match.
/// Data must be a multiple of block size (128-bits for AES, 64-bits for DES) for CBC and ECB modes.
/// Padding or ciphertext stealing must be managed by the application for these modes.
/// Data must also be a multiple of block size unless `last_block` is `true`.
pub fn payload_blocking<'c, C: Cipher<'c> + CipherSized>(
&self,
ctx: &mut Context<'c, C>,
input: &[u8],
output: &mut [u8],
last_block: bool,
) {
self.load_context(ctx);
let last_block_remainder = input.len() % C::BLOCK_SIZE;
// Perform checks for correctness.
if !ctx.aad_complete && ctx.header_len > 0 {
panic!("Additional associated data must be processed first!");
} else if !ctx.aad_complete {
ctx.aad_complete = true;
T::regs().cr().modify(|w| w.set_crypen(false));
T::regs().cr().modify(|w| w.set_gcm_ccmph(2));
T::regs().cr().modify(|w| w.fflush());
T::regs().cr().modify(|w| w.set_crypen(true));
}
if ctx.last_block_processed {
panic!("The last block has already been processed!");
}
if input.len() != output.len() {
panic!("Output buffer length must match input length.");
}
if !last_block {
if last_block_remainder != 0 {
panic!("Input length must be a multiple of {} bytes.", C::BLOCK_SIZE);
}
}
if C::REQUIRES_PADDING {
if last_block_remainder != 0 {
panic!("Input must be a multiple of {} bytes in ECB and CBC modes. Consider padding or ciphertext stealing.", C::BLOCK_SIZE);
}
}
if last_block {
ctx.last_block_processed = true;
}
// Load data into core, block by block.
let num_full_blocks = input.len() / C::BLOCK_SIZE;
for block in 0..num_full_blocks {
let mut index = block * C::BLOCK_SIZE;
let end_index = index + C::BLOCK_SIZE;
// Write block in
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
in_word.copy_from_slice(&input[index..index + 4]);
T::regs().din().write_value(u32::from_ne_bytes(in_word));
index += 4;
}
let mut index = block * C::BLOCK_SIZE;
let end_index = index + C::BLOCK_SIZE;
// Block until there is output to read.
while !T::regs().sr().read().ofne() {}
// Read block out
while index < end_index {
let out_word: u32 = T::regs().dout().read();
output[index..index + 4].copy_from_slice(u32::to_ne_bytes(out_word).as_slice());
index += 4;
}
}
// Handle the final block, which is incomplete.
if last_block_remainder > 0 {
let temp1 = ctx.cipher.pre_final_block(&T::regs(), ctx.dir);
let mut intermediate_data: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE];
let mut last_block: [u8; AES_BLOCK_SIZE] = [0; AES_BLOCK_SIZE];
last_block[..last_block_remainder].copy_from_slice(&input[input.len() - last_block_remainder..input.len()]);
let mut index = 0;
let end_index = C::BLOCK_SIZE;
// Write block in
while index < end_index {
let mut in_word: [u8; 4] = [0; 4];
in_word.copy_from_slice(&last_block[index..index + 4]);
T::regs().din().write_value(u32::from_ne_bytes(in_word));
index += 4;
}
let mut index = 0;
let end_index = C::BLOCK_SIZE;
// Block until there is output to read.
while !T::regs().sr().read().ofne() {}
// Read block out
while index < end_index {
let out_word: u32 = T::regs().dout().read();
intermediate_data[index..index + 4].copy_from_slice(u32::to_ne_bytes(out_word).as_slice());
index += 4;
}
// Handle the last block depending on mode.
let output_len = output.len();
output[output_len - last_block_remainder..output_len]
.copy_from_slice(&intermediate_data[0..last_block_remainder]);
let mut mask: [u8; 16] = [0; 16];
mask[..last_block_remainder].fill(0xFF);
ctx.cipher
.post_final_block(&T::regs(), ctx.dir, &mut intermediate_data, temp1, mask);
}
ctx.payload_len += input.len() as u64;
self.store_context(ctx);
}
/// This function only needs to be called for GCM, CCM, and GMAC modes to
/// generate an authentication tag.
pub fn finish_blocking<'c, C: Cipher<'c> + CipherSized + CipherAuthenticated>(
&self,
mut ctx: Context<'c, C>,
tag: &mut [u8; 16],
) {
self.load_context(&mut ctx);
T::regs().cr().modify(|w| w.set_crypen(false));
T::regs().cr().modify(|w| w.set_gcm_ccmph(3));
T::regs().cr().modify(|w| w.set_crypen(true));
let headerlen1: u32 = (ctx.header_len >> 32) as u32;
let headerlen2: u32 = ctx.header_len as u32;
let payloadlen1: u32 = (ctx.payload_len >> 32) as u32;
let payloadlen2: u32 = ctx.payload_len as u32;
T::regs().din().write_value(headerlen1.swap_bytes());
T::regs().din().write_value(headerlen2.swap_bytes());
T::regs().din().write_value(payloadlen1.swap_bytes());
T::regs().din().write_value(payloadlen2.swap_bytes());
while !T::regs().sr().read().ofne() {}
tag[0..4].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice());
tag[4..8].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice());
tag[8..12].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice());
tag[12..16].copy_from_slice(T::regs().dout().read().to_ne_bytes().as_slice());
T::regs().cr().modify(|w| w.set_crypen(false));
}
fn load_key(&self, key: &[u8]) {
// Load the key into the registers.
let mut keyidx = 0;
let mut keyword: [u8; 4] = [0; 4];
let keylen = key.len() * 8;
if keylen > 192 {
keyword.copy_from_slice(&key[keyidx..keyidx + 4]);
keyidx += 4;
T::regs().key(0).klr().write_value(u32::from_be_bytes(keyword));
keyword.copy_from_slice(&key[keyidx..keyidx + 4]);
keyidx += 4;
T::regs().key(0).krr().write_value(u32::from_be_bytes(keyword));
}
if keylen > 128 {
keyword.copy_from_slice(&key[keyidx..keyidx + 4]);
keyidx += 4;
T::regs().key(1).klr().write_value(u32::from_be_bytes(keyword));
keyword.copy_from_slice(&key[keyidx..keyidx + 4]);
keyidx += 4;
T::regs().key(1).krr().write_value(u32::from_be_bytes(keyword));
}
if keylen > 64 {
keyword.copy_from_slice(&key[keyidx..keyidx + 4]);
keyidx += 4;
T::regs().key(2).klr().write_value(u32::from_be_bytes(keyword));
keyword.copy_from_slice(&key[keyidx..keyidx + 4]);
keyidx += 4;
T::regs().key(2).krr().write_value(u32::from_be_bytes(keyword));
}
keyword.copy_from_slice(&key[keyidx..keyidx + 4]);
keyidx += 4;
T::regs().key(3).klr().write_value(u32::from_be_bytes(keyword));
keyword.copy_from_slice(&key[keyidx..keyidx + 4]);
T::regs().key(3).krr().write_value(u32::from_be_bytes(keyword));
}
fn store_context<'c, C: Cipher<'c> + CipherSized>(&self, ctx: &mut Context<'c, C>) {
// Wait for data block processing to finish.
while !T::regs().sr().read().ifem() {}
while T::regs().sr().read().ofne() {}
while T::regs().sr().read().busy() {}
// Disable crypto processor.
T::regs().cr().modify(|w| w.set_crypen(false));
// Save the peripheral state.
ctx.cr = T::regs().cr().read().0;
ctx.iv[0] = T::regs().init(0).ivlr().read();
ctx.iv[1] = T::regs().init(0).ivrr().read();
ctx.iv[2] = T::regs().init(1).ivlr().read();
ctx.iv[3] = T::regs().init(1).ivrr().read();
for i in 0..8 {
ctx.csgcmccm[i] = T::regs().csgcmccmr(i).read();
ctx.csgcm[i] = T::regs().csgcmr(i).read();
}
}
fn load_context<'c, C: Cipher<'c> + CipherSized>(&self, ctx: &Context<'c, C>) {
// Reload state registers.
T::regs().cr().write(|w| w.0 = ctx.cr);
T::regs().init(0).ivlr().write_value(ctx.iv[0]);
T::regs().init(0).ivrr().write_value(ctx.iv[1]);
T::regs().init(1).ivlr().write_value(ctx.iv[2]);
T::regs().init(1).ivrr().write_value(ctx.iv[3]);
for i in 0..8 {
T::regs().csgcmccmr(i).write_value(ctx.csgcmccm[i]);
T::regs().csgcmr(i).write_value(ctx.csgcm[i]);
}
self.load_key(ctx.cipher.key());
// Prepare key if applicable.
ctx.cipher.prepare_key(&T::regs());
T::regs().cr().write(|w| w.0 = ctx.cr);
// Enable crypto processor.
T::regs().cr().modify(|w| w.set_crypen(true));
}
}
pub(crate) mod sealed {
use super::*;
pub trait Instance {
fn regs() -> pac::cryp::Cryp;
}
}
/// CRYP instance trait.
pub trait Instance: sealed::Instance + Peripheral<P = Self> + crate::rcc::RccPeripheral + 'static + Send {
/// Interrupt for this CRYP instance.
type Interrupt: interrupt::typelevel::Interrupt;
}
foreach_interrupt!(
($inst:ident, cryp, CRYP, GLOBAL, $irq:ident) => {
impl Instance for peripherals::$inst {
type Interrupt = crate::interrupt::typelevel::$irq;
}
impl sealed::Instance for peripherals::$inst {
fn regs() -> crate::pac::cryp::Cryp {
crate::pac::$inst
}
}
};
);