use core::ops::{Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Sub, SubAssign};
use avr_oxide::concurrency::{interrupt, Isolated};
#[repr(C)]
pub struct Volatile<T>(T);
#[derive(Copy,Clone,Eq,PartialEq)]
#[repr(C)]
#[cfg_attr(not(target_arch="avr"), derive(Debug))]
pub struct BitField(u8);
pub trait BoundedIncDec {
fn binc_isolated<const BOUND: usize>(&mut self, isotoken: Isolated);
fn bdec_isolated<const BOUND: usize>(&mut self, isotoken: Isolated);
fn binc<const BOUND: usize>(&mut self){
interrupt::isolated(|isotoken| {
self.binc_isolated::<BOUND>(isotoken)
})
}
fn bdec<const BOUND: usize>(&mut self) {
interrupt::isolated(|isotoken|{
self.bdec_isolated::<BOUND>(isotoken)
})
}
}
pub trait BoundedMaths<T,R> {
fn bsub<const BOUND: usize>(&self, rhs: T) -> R {
interrupt::isolated(|isotoken|{
self.bsub_isolated::<BOUND>(isotoken, rhs)
})
}
fn bsub_isolated<const BOUND: usize>(&self, isotoken: Isolated, rhs: T) -> R;
}
#[derive(PartialEq)]
#[repr(C)]
pub struct VolatileBitField(Volatile<u8>);
#[derive(Copy,Clone,Eq,PartialEq)]
pub struct BitIndex(u8);
#[derive(Copy,Clone,Eq,PartialEq)]
pub struct BitRange {
lo: u8,
hip1: u8
}
pub trait BitMaskable {
fn negative_byte_mask(self) -> u8;
fn positive_byte_mask(self) -> u8;
fn lo_bit_index(self) -> u8;
}
pub trait BitFieldAccess {
fn read_byte(&self) -> u8;
fn write_byte(&mut self, val: u8);
fn is_set(&self, bit: BitIndex) -> bool {
(self.read_byte() & bit.positive_byte_mask()) > 0
}
fn is_all_set<B: BitMaskable+Copy>(&self, bits: B) -> bool {
(self.read_byte() & bits.positive_byte_mask()) == bits.positive_byte_mask()
}
fn is_any_set<B: BitMaskable+Copy>(&self, bits: B) -> bool {
(self.read_byte() & bits.positive_byte_mask()) > 0
}
fn is_all_clr<B: BitMaskable+Copy>(&self, bits: B) -> bool {
!self.is_any_set(bits)
}
fn is_clr(&self, bit: BitIndex) -> bool {
!self.is_set(bit)
}
fn set_isolated<B: BitMaskable+Copy>(&mut self, _isotoken: Isolated, bits: B) {
self.set(bits)
}
fn set<B: BitMaskable+Copy>(&mut self, bits: B) {
self.write_byte(self.read_byte() | bits.positive_byte_mask());
}
fn set_all(&mut self) {
self.write_byte(0xff);
}
fn clr_all(&mut self) {
self.write_byte(0x00);
}
fn exc_set<B: BitMaskable+Copy>(&mut self, bits: B) {
self.write_byte(bits.positive_byte_mask());
}
fn clr_isolated<B: BitMaskable+Copy>(&mut self, _isotoken: Isolated, bits: B) {
self.clr(bits)
}
fn clr<B: BitMaskable+Copy>(&mut self, bits: B) {
self.write_byte(self.read_byte() & bits.negative_byte_mask());
}
fn exc_clr<B: BitMaskable+Copy>(&mut self, bits: B) {
self.write_byte(bits.negative_byte_mask());
}
fn set_or_clr<B: BitMaskable+Copy>(&mut self, bits: B, set: bool) {
match set {
true => self.set(bits),
false => self.clr(bits)
}
}
fn set_to<B: BitMaskable+Copy>(&mut self, bits: B, value: u8) {
let masked = (value << bits.lo_bit_index()) & bits.positive_byte_mask();
self.write_byte((self.read_byte() & bits.negative_byte_mask()) | masked);
}
fn set_to_isolated<B: BitMaskable+Copy>(&mut self, _isotoken: Isolated, bits: B, value: u8) {
self.set_to(bits, value);
}
fn get_val<B: BitMaskable+Copy>(&self, bits: B) -> u8 {
(self.read_byte() & bits.positive_byte_mask()) >> bits.lo_bit_index()
}
}
impl BitIndex {
pub fn bit(b: usize) -> Self {
#[cfg(feature="runtime_checks")]
if b > 7 {
avr_oxide::oserror::halt(avr_oxide::oserror::OsError::BadParams);
}
Self {
0: b as u8
}
}
pub const fn bit_c(b: usize) -> Self {
assert!(b < 8);
Self {
0: b as u8
}
}
pub const fn binary(&self) -> u8 {
0b1u8 << self.0
}
}
impl BitMaskable for BitIndex {
fn negative_byte_mask(self) -> u8 {
! self.positive_byte_mask()
}
fn positive_byte_mask(self) -> u8 {
0x01u8 << self.lo_bit_index()
}
fn lo_bit_index(self) -> u8 {
self.0 as u8
}
}
impl BitRange {
pub fn range(lo: usize, hi: usize) -> Self {
#[cfg(feature="runtime_checks")]
if (lo > 7) || (hi > 7) || (lo > hi) {
avr_oxide::oserror::halt(avr_oxide::oserror::OsError::BadParams);
}
Self {
lo: lo as u8,
hip1: hi as u8 + 1
}
}
pub const fn range_c(lo: usize, hi: usize) -> Self {
assert!(lo < 8);
assert!(hi < 8);
assert!(lo <= hi);
Self {
lo: lo as u8,
hip1: hi as u8 + 1
}
}
}
impl BitMaskable for BitRange {
fn negative_byte_mask(self) -> u8 {
! self.positive_byte_mask()
}
fn positive_byte_mask(self) -> u8 {
(0xff << self.hip1) as u8 ^ (0xff << self.lo) as u8
}
fn lo_bit_index(self) -> u8 {
self.lo
}
}
impl BitField {
pub const fn with_initial(vals: u8) -> Self {
BitField {
0: vals
}
}
pub fn with_bits_set(bits: &[BitIndex]) -> Self {
let mut new = Self::all_clr();
for bit in bits {
new.set(*bit);
}
new
}
pub const fn all_clr() -> Self {
BitField {
0: 0u8
}
}
pub const fn all_set() -> Self {
BitField {
0: 0xffu8
}
}
}
impl BitFieldAccess for BitField {
fn read_byte(&self) -> u8 {
self.0
}
fn write_byte(&mut self, val: u8) {
self.0 = val
}
}
impl BitAnd<u8> for BitField {
type Output = u8;
fn bitand(self, rhs: u8) -> Self::Output {
self.0 & rhs
}
}
impl BitOr<u8> for BitField {
type Output = u8;
fn bitor(self, rhs: u8) -> Self::Output {
self.0 | rhs
}
}
impl BitFieldAccess for VolatileBitField {
fn read_byte(&self) -> u8 {
self.0.read()
}
fn write_byte(&mut self, val: u8) {
self.0.write(val);
}
fn set_isolated<B: BitMaskable>(&mut self, _isotoken: Isolated, bits: B) {
self.write_byte(self.read_byte() | bits.positive_byte_mask());
}
fn set<B: BitMaskable+Copy>(&mut self, bits: B) {
avr_oxide::concurrency::interrupt::isolated(|isotoken|{
self.set_isolated(isotoken, bits)
})
}
fn clr_isolated<B: BitMaskable+Copy>(&mut self, _isotoken: Isolated, bits: B) {
self.write_byte(self.read_byte() & bits.negative_byte_mask());
}
fn clr<B: BitMaskable+Copy>(&mut self, bits: B) {
avr_oxide::concurrency::interrupt::isolated(|isotoken|{
self.clr_isolated(isotoken, bits)
});
}
fn set_or_clr<B: BitMaskable+Copy>(&mut self, bits: B, set: bool) {
avr_oxide::concurrency::interrupt::isolated(|isotoken| {
match set {
true => self.set_isolated(isotoken, bits),
false => self.clr_isolated(isotoken, bits)
}
})
}
fn set_to<B: BitMaskable+Copy>(&mut self, bits: B, value: u8) {
avr_oxide::concurrency::interrupt::isolated(|isotoken| {
self.set_to_isolated(isotoken, bits, value)
})
}
fn set_to_isolated<B: BitMaskable+Copy>(&mut self, _isotoken: Isolated, bits: B, value: u8) {
let masked = (value << bits.lo_bit_index()) & bits.positive_byte_mask();
self.write_byte((self.read_byte() & bits.negative_byte_mask()) | masked);
}
}
impl VolatileBitField {
pub const fn all_clr() -> Self {
VolatileBitField(Volatile::<u8>::zero())
}
pub const fn all_set() -> Self {
VolatileBitField(Volatile::<u8>::effeff())
}
pub fn snapshot(&self) -> BitField {
BitField::with_initial(self.0.snapshot())
}
}
impl From<u8> for Volatile<u8> {
fn from(val: u8) -> Self {
Volatile(val)
}
}
impl Volatile<u8> {
pub const fn zero() -> Self {
Volatile(0)
}
pub const fn effeff() -> Self {
Volatile(0xff)
}
pub fn snapshot(&self) -> u8 {
unsafe {
core::ptr::read_volatile(&self.0 as *const u8)
}
}
pub fn read(&self) -> u8 {
self.snapshot()
}
pub fn write(&mut self, val: u8) {
unsafe {
core::ptr::write_volatile(&mut self.0 as *mut u8, val)
}
}
}
impl Add<u8> for Volatile<u8> {
type Output = u8;
fn add(self, rhs: u8) -> Self::Output {
self.read() + rhs
}
}
impl AddAssign<u8> for Volatile<u8> {
fn add_assign(&mut self, rhs: u8) {
avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
self.write(self.read() + rhs);
});
}
}
impl Sub<u8> for Volatile<u8> {
type Output = u8;
fn sub(self, rhs: u8) -> Self::Output {
self.read() - rhs
}
}
impl SubAssign<u8> for Volatile<u8> {
fn sub_assign(&mut self, rhs: u8) {
avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
self.write(self.read() - rhs);
});
}
}
impl BitAnd<u8> for Volatile<u8> {
type Output = u8;
fn bitand(self, rhs: u8) -> Self::Output {
self.read() & rhs
}
}
impl BitAndAssign<u8> for Volatile<u8> {
fn bitand_assign(&mut self, rhs: u8) {
avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
self.write(self.read() & rhs);
});
}
}
impl BitOr<u8> for Volatile<u8> {
type Output = u8;
fn bitor(self, rhs: u8) -> Self::Output {
self.read() | rhs
}
}
impl BitOrAssign<u8> for Volatile<u8> {
fn bitor_assign(&mut self, rhs: u8) {
avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
self.write(self.read() | rhs);
});
}
}
impl BitXor<u8> for Volatile<u8> {
type Output = u8;
fn bitxor(self, rhs: u8) -> Self::Output {
self.read() ^ rhs
}
}
impl BitXorAssign<u8> for Volatile<u8> {
fn bitxor_assign(&mut self, rhs: u8) {
avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
self.write(self.read() ^ rhs);
});
}
}
impl PartialEq<u8> for Volatile<u8> {
fn eq(&self, other: &u8) -> bool {
self.read() == *other
}
}
impl PartialEq<Volatile<u8>> for Volatile<u8> {
fn eq(&self, other: &Volatile<u8>) -> bool {
avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
self.read() == other.read()
})
}
}
impl BoundedIncDec for Volatile<u8> {
fn binc_isolated<const BOUND: usize>(&mut self, _isotoken: Isolated) {
self.write((self.read()+1) % BOUND as u8);
}
fn bdec_isolated<const BOUND: usize>(&mut self, _isotoken: Isolated) {
let val = self.read();
if val > 0 {
self.write(val - 1);
} else {
self.write((BOUND-1) as u8);
}
}
}
impl BoundedMaths<u8,u8> for Volatile<u8> {
fn bsub_isolated<const BOUND: usize>(&self, _isotoken: Isolated, rhs: u8) -> u8 {
let val = self.read();
if val >= rhs {
val - rhs
} else {
(BOUND as u8 - (rhs % BOUND as u8)) + val
}
}
}
impl BoundedMaths<&Volatile<u8>,u8> for Volatile<u8> {
fn bsub_isolated<const BOUND: usize>(&self, _isotoken: Isolated, rhs: &Volatile<u8>) -> u8 {
let val = self.read();
let rhs = rhs.read();
if val >= rhs {
val - rhs
} else {
(BOUND as u8 - (rhs % BOUND as u8)) + val
}
}
}
impl BoundedMaths<u8,u8> for u8 {
fn bsub<const BOUND: usize>(&self, rhs: u8) -> u8 {
let val = *self;
if val >= rhs {
val - rhs
} else {
(BOUND as u8 - (rhs % BOUND as u8)) + val
}
}
fn bsub_isolated<const BOUND: usize>(&self, _isotoken: Isolated, rhs: u8) -> u8 {
self.bsub::<BOUND>(rhs)
}
}
macro_rules! volatile_multibyte_impl {
($innertype:ty) => {
impl From<$innertype> for Volatile<$innertype> {
fn from(val: $innertype) -> Self {
Volatile(val)
}
}
impl Volatile<$innertype> {
pub const fn zero() -> Self {
Self(0 as $innertype)
}
pub fn snapshot(&self) -> $innertype {
unsafe {
avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
core::ptr::read_volatile(&self.0 as *const $innertype)
})
}
}
pub fn read(&self) -> $innertype {
self.snapshot()
}
pub fn write(&mut self, val: $innertype) {
unsafe {
avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
core::ptr::write_volatile(&mut self.0 as *mut $innertype, val)
})
}
}
pub fn write_isolated(&mut self, _isotoken: avr_oxide::concurrency::Isolated, val: $innertype) {
unsafe {
core::ptr::write_volatile(&mut self.0 as *mut $innertype, val)
}
}
}
impl Add<$innertype> for Volatile<$innertype> {
type Output = $innertype;
fn add(self, rhs: $innertype) -> Self::Output {
self.read() + rhs
}
}
impl AddAssign<$innertype> for Volatile<$innertype> {
fn add_assign(&mut self, rhs: $innertype) {
avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
self.write(self.read() + rhs);
});
}
}
impl Sub<$innertype> for Volatile<$innertype> {
type Output = $innertype;
fn sub(self, rhs: $innertype) -> Self::Output {
self.read() - rhs
}
}
impl SubAssign<$innertype> for Volatile<$innertype> {
fn sub_assign(&mut self, rhs: $innertype) {
avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
self.write(self.read() - rhs);
});
}
}
impl BitAnd<$innertype> for Volatile<$innertype> {
type Output = $innertype;
fn bitand(self, rhs: $innertype) -> Self::Output {
self.read() & rhs
}
}
impl BitAndAssign<$innertype> for Volatile<$innertype> {
fn bitand_assign(&mut self, rhs: $innertype) {
avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
self.write(self.read() & rhs);
});
}
}
impl BitOr<$innertype> for Volatile<$innertype> {
type Output = $innertype;
fn bitor(self, rhs: $innertype) -> Self::Output {
self.read() | rhs
}
}
impl BitOrAssign<$innertype> for Volatile<$innertype> {
fn bitor_assign(&mut self, rhs: $innertype) {
avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
self.write(self.read() | rhs);
});
}
}
impl BitXor<$innertype> for Volatile<$innertype> {
type Output = $innertype;
fn bitxor(self, rhs: $innertype) -> Self::Output {
self.read() ^ rhs
}
}
impl BitXorAssign<$innertype> for Volatile<$innertype> {
fn bitxor_assign(&mut self, rhs: $innertype) {
avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
self.write(self.read() ^ rhs);
});
}
}
impl PartialEq<$innertype> for Volatile<$innertype> {
fn eq(&self, other: &$innertype) -> bool {
self.read() == *other
}
}
impl PartialEq<Volatile<$innertype>> for Volatile<$innertype> {
fn eq(&self, other: &Volatile<$innertype>) -> bool {
avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
self.read() == other.read()
})
}
}
}
}
volatile_multibyte_impl!(u16);
volatile_multibyte_impl!(u32);
volatile_multibyte_impl!(usize);
#[cfg(test)]
mod tests {
use avr_oxide::util::datatypes::{BitField, BitFieldAccess, BitIndex, BitMaskable, BitRange, BoundedIncDec, BoundedMaths, Volatile, VolatileBitField};
#[test]
fn test_bitindex() {
let bit4 = BitIndex(4);
assert_eq!(bit4.lo_bit_index(), 4);
assert_eq!(bit4.positive_byte_mask(), 0b00010000);
assert_eq!(bit4.negative_byte_mask(), 0b11101111);
}
#[test]
fn test_bitrange() {
let bit2to6 = BitRange::range(2,6);
assert_eq!(bit2to6.lo_bit_index(), 2);
assert_eq!(bit2to6.positive_byte_mask(), 0b01111100);
assert_eq!(bit2to6.negative_byte_mask(), 0b10000011);
}
#[test]
fn test_bitfield() {
let mut all_zero : BitField = BitField::all_clr();
let mut all_one : BitField = BitField::all_set();
assert!(all_zero == BitField::with_initial(0b00000000));
assert!(all_one == BitField::with_initial(0b11111111));
all_zero.set(BitIndex(3));
all_one.set(BitIndex(3));
assert!(all_zero == BitField::with_initial(0b00001000));
assert!(all_one == BitField::with_initial(0b11111111));
all_zero.clr(BitIndex(7));
all_one.clr(BitIndex(7));
assert!(all_zero == BitField::with_initial(0b00001000));
assert!(all_one == BitField::with_initial(0b01111111));
assert!(all_one.is_set(BitIndex(6)));
assert!(all_one.is_clr(BitIndex(7)));
assert!(!all_zero.is_set(BitIndex(0)));
assert!(!all_one.is_clr(BitIndex(1)));
all_zero.set(BitRange::range(5,7));
all_one.clr(BitRange::range_c(2,4));
assert!(all_zero == BitField::with_initial(0b11101000));
assert!(all_one == BitField::with_initial(0b01100011));
assert!(all_zero.get_val(BitRange::range_c(1,5)) == 0b10100);
all_one.set_to(BitRange::range(0,3), 0b1010);
assert!(all_one == BitField::with_initial(0b01101010));
}
#[test]
fn test_volatile_bitfield() {
let mut all_zero : VolatileBitField = VolatileBitField::all_clr();
let mut all_one : VolatileBitField = VolatileBitField::all_set();
assert!(all_zero.snapshot() == BitField::with_initial(0b00000000));
assert!(all_one.snapshot() == BitField::with_initial(0b11111111));
all_zero.set(BitIndex(3));
all_one.set(BitIndex(3));
assert!(all_zero.snapshot() == BitField::with_initial(0b00001000));
assert!(all_one.snapshot() == BitField::with_initial(0b11111111));
all_zero.clr(BitIndex(7));
all_one.clr(BitIndex(7));
assert!(all_zero.snapshot() == BitField::with_initial(0b00001000));
assert!(all_one.snapshot() == BitField::with_initial(0b01111111));
assert!(all_one.is_set(BitIndex(6)));
assert!(all_one.is_clr(BitIndex(7)));
assert!(!all_zero.is_set(BitIndex(0)));
assert!(!all_one.is_clr(BitIndex(1)));
all_zero.set(BitRange::range(5,7));
all_one.clr(BitRange::range_c(2,4));
assert!(all_zero.snapshot() == BitField::with_initial(0b11101000));
assert!(all_one.snapshot() == BitField::with_initial(0b01100011));
assert!(all_zero.get_val(BitRange::range(1,5)) == 0b10100);
all_one.set_to(BitRange::range_c(0,3), 0b1010);
assert!(all_one.snapshot() == BitField::with_initial(0b01101010));
}
#[test]
pub fn test_bitfield_init() {
let some_set = BitField::with_bits_set(&[BitIndex(1),BitIndex(7),BitIndex(4)]);
assert!(some_set == BitField::with_initial(0b10010010));
}
#[test]
pub fn test_volatile_u8() {
let mut val : Volatile<u8> = 12u8.into();
assert_eq!(val.read(), 12u8);
val.write(46);
assert_eq!(val.read(), 46u8);
val += 32u8;
assert_eq!(val.read(), 78);
}
#[test]
fn test_bounded_maths() {
let mut val:Volatile<u8> = 0.into();
assert_eq!(val.bsub::<10>(1u8), 9);
assert_eq!(val.read(), 0);
val.binc::<10>();
assert_eq!(val.read(), 1);
val.binc::<10>();
assert_eq!(val.read(), 2);
val.bdec::<10>();
assert_eq!(val.read(), 1);
val.bdec::<10>();
assert_eq!(val.read(), 0);
val.bdec::<10>();
assert_eq!(val.read(), 9);
val.binc::<10>();
assert_eq!(val.read(), 0);
let zero:Volatile<u8> = 0.into();
let one:Volatile<u8> = 1.into();
let five:Volatile<u8> = 5.into();
let four:Volatile<u8> = 4.into();
assert_eq!(five.bsub::<10>(4), 1);
assert_eq!(four.bsub::<10>(5), 9);
assert_eq!(zero.bsub::<10>(1), 9);
assert_eq!(one.bsub::<10>(0), 1);
}
}