use core::alloc::Layout;
use avr_oxide::hal::generic::cpu::Cpu;
use avr_oxide::util::datatypes::Volatile;
const STACKGUARD:u8 = 0b10101010;
pub(crate) trait ThreadStack {
fn get_stack_top(&self) -> usize;
fn is_stack_guard_valid(&self) -> bool;
fn is_sp_within(&self, isotoken: avr_oxide::concurrency::interrupt::token::Isolated) -> bool;
fn halt_if_not_sp_within(&self, isotoken: avr_oxide::concurrency::interrupt::token::Isolated) {
if !self.is_sp_within(isotoken) {
avr_oxide::oserror::halt(avr_oxide::oserror::OsError::StackOverflow);
}
}
fn halt_if_stack_crashed(&self) {
if !self.is_stack_guard_valid() {
avr_oxide::oserror::halt(avr_oxide::oserror::OsError::StackOverflow);
}
}
fn get_stack_free(&self) -> usize;
fn get_size(&self) -> usize;
}
#[repr(C)]
pub struct StaticThreadStack<const STACK_SIZE:usize> {
loguard: u8,
stack: [u8; STACK_SIZE],
higuard: u8
}
pub struct DynamicThreadStack {
size: usize,
space: *mut u8
}
#[cfg(target_arch="avr")]
extern {
static mut __ISR_STACK_HIGUARD : Volatile<u8>;
static mut __ISR_STACK_LOGUARD : Volatile<u8>;
static mut __ISR_STACK_BOTTOM : u8;
static mut __ISR_STACK_TOP : u8;
}
#[cfg(not(target_arch="avr"))]
static mut __ISR_STACK_HIGUARD : Volatile<u8> = Volatile::<u8>::zero();
#[cfg(not(target_arch="avr"))]
static mut __ISR_STACK_LOGUARD : Volatile<u8> = Volatile::<u8>::zero();
#[cfg(not(target_arch="avr"))]
static mut __ISR_STACK_BOTTOM : u8 = 0u8;
#[cfg(not(target_arch="avr"))]
static mut __ISR_STACK_TOP : u8 = 0u8;
pub mod kernel {
use super::{__ISR_STACK_BOTTOM,__ISR_STACK_TOP,__ISR_STACK_HIGUARD,__ISR_STACK_LOGUARD};
pub(crate) unsafe fn initialise() {
__ISR_STACK_LOGUARD.write(0xde);
__ISR_STACK_HIGUARD.write(0xad);
let mut offset = 0isize;
let base = &mut __ISR_STACK_BOTTOM as *mut u8;
while *base.offset(offset) != 0xad {
*base.offset(offset) = 0xa5;
offset += 1;
}
}
pub(crate) fn halt_if_stack_invalid() {
if !is_stack_guard_valid() {
avr_oxide::oserror::halt(avr_oxide::oserror::OsError::KernelStackOverflow);
}
}
pub(crate) fn is_stack_guard_valid() -> bool {
unsafe {
(__ISR_STACK_LOGUARD.read() == 0xde) && (__ISR_STACK_HIGUARD.read() == 0xad)
}
}
pub(crate) fn get_size() -> usize {
unsafe {
((&__ISR_STACK_TOP as *const u8 as usize) - (&__ISR_STACK_BOTTOM as *const u8 as usize))+1
}
}
pub(crate) fn get_stack_top() -> usize {
unsafe {
&__ISR_STACK_TOP as *const u8 as usize
}
}
pub(crate) fn get_stack_free() -> usize {
unsafe {
for count in 0..get_size() {
if *(&__ISR_STACK_BOTTOM as *const u8).offset(count as isize) != 0xa5 {
return count;
}
}
get_size()
}
}
}
impl DynamicThreadStack {
pub fn new(size: usize) -> Self {
unsafe {
let layout = match Layout::from_size_align(size+2, 1) {
Ok(l) => l,
Err(_) => avr_oxide::oserror::halt(avr_oxide::oserror::OsError::InternalError)
};
let region = avr_oxide::alloc::alloc(layout);
*(region.offset(0)) = STACKGUARD;
for offset in 1..=size {
*(region.offset(offset as isize)) = 0xD5;
}
*(region.offset((size+1) as isize)) = STACKGUARD;
DynamicThreadStack {
size,
space: region
}
}
}
}
impl Drop for DynamicThreadStack {
fn drop(&mut self) {
unsafe {
let layout = match Layout::from_size_align(self.size+2, 1) {
Ok(l) => l,
Err(_) => avr_oxide::oserror::halt(avr_oxide::oserror::OsError::InternalError)
};
avr_oxide::alloc::dealloc(self.space, layout);
}
}
}
impl<const STACK_SIZE:usize> StaticThreadStack<STACK_SIZE> {
#[allow(dead_code)]
pub const fn new() -> Self {
StaticThreadStack {
loguard: STACKGUARD,
stack: [0x55; STACK_SIZE],
higuard: STACKGUARD
}
}
}
impl<const STACK_SIZE:usize> ThreadStack for StaticThreadStack<STACK_SIZE> {
fn get_stack_top(&self) -> usize {
self as *const StaticThreadStack<STACK_SIZE> as usize + STACK_SIZE
}
#[inline(always)]
fn is_stack_guard_valid(&self) -> bool {
self.loguard == STACKGUARD && self.higuard == STACKGUARD
}
fn is_sp_within(&self, _isotoken: avr_oxide::concurrency::interrupt::token::Isolated) -> bool {
let sp = avr_oxide::cpu!().read_sp() as usize;
sp > (&self.loguard as *const u8 as usize) && sp < (&self.higuard as *const u8 as usize)
}
fn get_stack_free(&self) -> usize {
for count in 0..STACK_SIZE {
if self.stack[count] != 0x55 {
return count;
}
}
STACK_SIZE
}
fn get_size(&self) -> usize {
STACK_SIZE
}
}
impl ThreadStack for DynamicThreadStack {
fn get_stack_top(&self) -> usize {
unsafe {
self.space.offset(self.size as isize) as usize
}
}
#[inline(always)]
fn is_stack_guard_valid(&self) -> bool {
unsafe {
*self.space.offset(0) == STACKGUARD && *self.space.offset((self.size+1) as isize) == STACKGUARD
}
}
fn is_sp_within(&self, _isotoken: avr_oxide::concurrency::interrupt::token::Isolated) -> bool {
let sp = avr_oxide::cpu!().read_sp() as usize;
sp > (self.space as usize) && sp <= ((self.space as usize) + self.size)
}
fn get_stack_free(&self) -> usize {
for count in 0..self.size {
unsafe {
if *self.space.offset((count+1) as isize) != 0xD5 {
return count;
}
}
}
self.size
}
fn get_size(&self) -> usize {
self.size
}
}