1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
/* stack.rs
 *
 * Developed by Tim Walls <tim.walls@snowgoons.com>
 * Copyright (c) All Rights Reserved, Tim Walls
 */
//! Implementations of stack allocations for threads.

// Imports ===================================================================

use core::alloc::Layout;
use avr_oxide::hal::generic::cpu::Cpu;
use avr_oxide::util::datatypes::Volatile;

// Declarations ==============================================================
const STACKGUARD:u8 = 0b10101010;

/// A stack that can be used by a thread
pub(crate) trait ThreadStack {
  /// Return the address of the top byte of the stack
  fn get_stack_top(&self) -> usize;

  /// Check if the stack guards have been corrupted
  fn is_stack_guard_valid(&self) -> bool;

  /// Check if the stack pointer is currently within the bounds of this stack.
  fn is_sp_within(&self, isotoken: avr_oxide::concurrency::interrupt::token::Isolated) -> bool;

  /// Halt if the stack pointer is not within the bounds of this stack.
  fn halt_if_not_sp_within(&self, isotoken: avr_oxide::concurrency::interrupt::token::Isolated) {
    if !self.is_sp_within(isotoken) {
      avr_oxide::oserror::halt(avr_oxide::oserror::OsError::StackOverflow);
    }
  }

  /// Halt if stack guards have been corrupted
  fn halt_if_stack_crashed(&self) {
    if !self.is_stack_guard_valid() {
      avr_oxide::oserror::halt(avr_oxide::oserror::OsError::StackOverflow);
    }
  }

  /// Indicate how much of the stack has not been written to yet
  fn get_stack_free(&self) -> usize;

  /// Return the size of the stack
  fn get_size(&self) -> usize;
}


/**
 * A simple statically-allocated stack
 */
#[repr(C)]
pub struct StaticThreadStack<const STACK_SIZE:usize> {
  loguard: u8,
  stack: [u8; STACK_SIZE],
  higuard: u8
}

/**
 * A dynamically allocated stack that lives on the heap
 */
pub struct DynamicThreadStack {
  size: usize,
  space: *mut u8
}

#[cfg(target_arch="avr")]
extern {
  static mut __ISR_STACK_HIGUARD   : Volatile<u8>;
  static mut __ISR_STACK_LOGUARD   : Volatile<u8>;
  static mut __ISR_STACK_BOTTOM    : u8;
  static mut __ISR_STACK_TOP       : u8;
}
#[cfg(not(target_arch="avr"))]
static mut __ISR_STACK_HIGUARD : Volatile<u8> = Volatile::<u8>::zero();
#[cfg(not(target_arch="avr"))]
static mut __ISR_STACK_LOGUARD : Volatile<u8> = Volatile::<u8>::zero();
#[cfg(not(target_arch="avr"))]
static mut __ISR_STACK_BOTTOM  : u8 = 0u8;
#[cfg(not(target_arch="avr"))]
static mut __ISR_STACK_TOP     : u8 = 0u8;

// Code ======================================================================

pub mod kernel {
  use super::{__ISR_STACK_BOTTOM,__ISR_STACK_TOP,__ISR_STACK_HIGUARD,__ISR_STACK_LOGUARD};

  pub(crate) unsafe fn initialise() {
    // Set guards on the kernel/ISR stack
    __ISR_STACK_LOGUARD.write(0xde);
    __ISR_STACK_HIGUARD.write(0xad);

    // Fill the stack space with a canary so we can later try to calculate
    // stack space
    let mut offset = 0isize;
    let base = &mut __ISR_STACK_BOTTOM as *mut u8;
    while *base.offset(offset) != 0xad {
      *base.offset(offset) = 0xa5;
      offset += 1;
    }
  }

  pub(crate) fn halt_if_stack_invalid() {
    if !is_stack_guard_valid()  {
      avr_oxide::oserror::halt(avr_oxide::oserror::OsError::KernelStackOverflow);
    }
  }

  pub(crate) fn is_stack_guard_valid() -> bool {
    unsafe {
      (__ISR_STACK_LOGUARD.read() == 0xde) && (__ISR_STACK_HIGUARD.read() == 0xad)
    }
  }

  pub(crate) fn get_size() -> usize {
    unsafe {
      ((&__ISR_STACK_TOP as *const u8 as usize) - (&__ISR_STACK_BOTTOM as *const u8 as usize))+1
    }
  }

  pub(crate) fn get_stack_top() -> usize {
    unsafe {
      &__ISR_STACK_TOP as *const u8 as usize
    }
  }

  pub(crate) fn get_stack_free() -> usize {
    unsafe {
      for count in 0..get_size() {
        if *(&__ISR_STACK_BOTTOM as *const u8).offset(count as isize) != 0xa5 {
          return count;
        }
      }
      get_size()
    }
  }
}

impl DynamicThreadStack {
  pub fn new(size: usize) -> Self {
    unsafe {
      let layout = match Layout::from_size_align(size+2, 1) {
        Ok(l) => l,
        Err(_) => avr_oxide::oserror::halt(avr_oxide::oserror::OsError::InternalError)
      };

      let region = avr_oxide::alloc::alloc(layout);

      *(region.offset(0)) = STACKGUARD;
      for offset in 1..=size {
        *(region.offset(offset as isize)) = 0xD5;
      }
      *(region.offset((size+1) as isize)) = STACKGUARD;

      DynamicThreadStack {
        size,
        space: region
      }
    }
  }
}

impl Drop for DynamicThreadStack {
  /// Take care to deallocate the manually allocated RAM block when we are
  /// dropped.
  fn drop(&mut self) {
    unsafe {
      let layout = match Layout::from_size_align(self.size+2, 1) {
        Ok(l) => l,
        Err(_) => avr_oxide::oserror::halt(avr_oxide::oserror::OsError::InternalError)
      };

      avr_oxide::alloc::dealloc(self.space, layout);
    }
  }
}

impl<const STACK_SIZE:usize> StaticThreadStack<STACK_SIZE> {
  #[allow(dead_code)]
  pub const fn new() -> Self {
    StaticThreadStack {
      loguard: STACKGUARD,
      stack:   [0x55; STACK_SIZE],
      higuard: STACKGUARD
    }
  }
}

impl<const STACK_SIZE:usize> ThreadStack for StaticThreadStack<STACK_SIZE> {
  /// Return the address of the top byte of the stack
  fn get_stack_top(&self) -> usize {
    self as *const StaticThreadStack<STACK_SIZE> as usize + STACK_SIZE
  }

  /// Check if the stack guards have been corrupted
  #[inline(always)]
  fn is_stack_guard_valid(&self) -> bool {
    self.loguard == STACKGUARD && self.higuard == STACKGUARD
  }

  fn is_sp_within(&self, _isotoken: avr_oxide::concurrency::interrupt::token::Isolated) -> bool {
    let sp = avr_oxide::cpu!().read_sp() as usize;

    sp > (&self.loguard as *const u8 as usize) && sp < (&self.higuard as *const u8 as usize)
  }

  fn get_stack_free(&self) -> usize {
    for count in 0..STACK_SIZE {
      if self.stack[count] != 0x55 {
        return count;
      }
    }
    STACK_SIZE
  }

  fn get_size(&self) -> usize {
    STACK_SIZE
  }
}

impl ThreadStack for DynamicThreadStack {
  fn get_stack_top(&self) -> usize {
    unsafe {
      // Note we offset by size, not size-1, because of the bottom guard
      // byte at location 0
      self.space.offset(self.size as isize) as usize
    }
  }

  #[inline(always)]
  fn is_stack_guard_valid(&self) -> bool {
    unsafe {
      *self.space.offset(0) == STACKGUARD && *self.space.offset((self.size+1) as isize) == STACKGUARD
    }
  }

  fn is_sp_within(&self, _isotoken: avr_oxide::concurrency::interrupt::token::Isolated) -> bool {
    let sp = avr_oxide::cpu!().read_sp() as usize;

    sp > (self.space as usize) && sp <= ((self.space as usize) + self.size)
  }

  fn get_stack_free(&self) -> usize {
    for count in 0..self.size {
      unsafe {
        if *self.space.offset((count+1) as isize) != 0xD5 {
          return count;
        }
      }
    }
    self.size
  }

  fn get_size(&self) -> usize {
    self.size
  }
}