1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
/* scheduler.rs
 *
 * Developed by Tim Walls <tim.walls@snowgoons.com>
 * Copyright (c) All Rights Reserved, Tim Walls
 */
//! The AVRoxide scheduler implementation

use core::mem::MaybeUninit;
// Imports ===================================================================
use avr_oxide::{cpu, OxideResult, thread};
use avr_oxide::deviceconsts::oxide::MAX_THREADS;
use avr_oxide::hal::generic::cpu::{ContextSaveRestore, Cpu};
use avr_oxide::util::datatypes::Volatile;
use avr_oxide::alloc::boxed::Box;
use avr_oxide::hal::generic::cpu::ProcessorContext;
use avr_oxide::concurrency::{interrupt, stack};
use avr_oxide::concurrency::util::{ThreadId, ThreadSet};
use avr_oxide::concurrency::stack::ThreadStack;
use avr_oxide::oserror::OsError;
use avr_oxide::OxideResult::{Ok,Err};

// Declarations ==============================================================
const IDLE_THREAD_STACK_SIZE: usize = 48;

/**
 * The current state of scheduled/running threads.
 */
pub(super) struct SchedulerState<const MT: usize> {
  pub(super) threads: [Option<ThreadContext>; MT],
}

static mut SCHEDULER: MaybeUninit<SchedulerState<MAX_THREADS>> = MaybeUninit::uninit();

// Address of the next thread context structure to context switch to;
// this is exported from our assembly language routines
#[cfg(target_arch="avr")]
extern {
  static mut __NEXT_THREAD_CONTEXT : Volatile<usize>;
}

#[cfg(not(target_arch="avr"))]
static mut __NEXT_THREAD_CONTEXT : Volatile<usize> = Volatile::<usize>::zero();

/**
 * All the context we store with a thread.
 */
#[repr(C)]
pub struct ThreadContext {
  // WARNING WARNING WARNING WARNING (YES, this is a hack)
  //
  // We put the cpu_context as the first thing in our struct, and also
  // insist on `repr(C)`.
  //
  // Why?  Because then the address of the ProcessorContext and the address
  // of the ThreadContext are the same thing.  This allows for some
  // optimisation when doing things like GetCurrentThread(), because
  // we can just use our global processor context register to get at the
  // thread.
  //
  // It also makes debugging easier to only have one number to follow for
  // either.
  //
  // Yes, I am a bad person.
  pub(crate) cpu_context:     ProcessorContext,
  // WARNING WARNING WARNING WARNING ========================================
  pub(crate) guard:           Volatile<u8>,
  pub(crate) state:           ThreadState,
  pub(crate) stack:           Option<Box<dyn ThreadStack>>,
  pub(crate) entrypoint:      Option<Box<dyn FnOnce()->u8>>,
  pub(crate) returncode:      u8,
  pub(crate) waiting_threads: ThreadSet
}


/**
 * The state that a thread can be in
 */
#[derive(PartialEq,Copy,Clone)]
pub enum ThreadState {
  /// This thread can be scheduled for execution
  Schedulable,
  /// This thread can be scheduled for execution, but it is a background
  /// thread that should only be run if nothing else is available.
  BackgroundSchedulable,
  /// This thread is suspended and cannot (currently) be executed until
  /// its state is changed to Schedulable
  Suspended,
  /// This thread has completed execution, but nobody has joined it
  /// and consumed the return value, so we can't replace it yet
  Zombie,
  /// This thread has been killed and may be replaced.
  Dead,
  /// This thread is waiting for a mutex
  BlockedOnMutex,
  /// This thread is waiting to join another thread
  BlockedOnThread,
  /// This thread is waiting for a queue to produce or consume data
  BlockedOnQueue,
  /// This thread is waiting for an EventWait to release it
  BlockedOnEvent,
}



// Code ======================================================================
/**
 * Get a reference to the scheduler instance.
 *
 * # SAFETY
 * It's up to the caller to ensure that the scheduler was actually initialised
 * before calling this.
 */
pub(super) unsafe fn instance() -> &'static mut SchedulerState<MAX_THREADS> {
  SCHEDULER.assume_init_mut()
}

/**
 * The idle thread.  This literally just exists to soak up CPU cycles when
 * nothing else can be scheduled.  (Actually, rather than soak them up, it
 * will try to `sleep` as much as possible.)
 */
fn idle_thread() -> u8 {
  interrupt::isolated(|isotoken|{
    set_current_thread_state(isotoken, ThreadState::BackgroundSchedulable);
  });
  loop {
    unsafe {
      thread::yield_now();

      // Only sleep waiting for an interrupt if we're reasonably sure
      // interrupts that might wake us up are happening :-).
      core::arch::asm!(
      "    sbic {context_flags_reg},{flag_preemption}",
      "    sleep",
      "    cbi {context_flags_reg},{flag_preemption}",
      context_flags_reg = const(avr_oxide::hardware::cpu::cpuregs::IOADR_CONTEXT_FLAGS),
      flag_preemption = const(avr_oxide::hal::generic::cpu::CONTEXT_FLAG_PREEMPTION),
      );
    }
  }
}

impl ThreadState {
  /// Modify the thread state.  This method includes 'business rules' that
  /// ensure only sane changes of state are permitted (e.g. a Dead or Zombie
  /// thread can't go to Schedulable)
  fn change(&mut self, new_state: ThreadState) {
    // Why do we write this in such an ugly way?  Because of code generation
    // errors from LLVM/AVR when we use the more obvious 'match' based
    // approach.
    //
    // Sigh.

    // Check not trying to block from within an ISR
    #[cfg(feature="runtime_checks")]
    if cpu!().in_isr() {
      if new_state == Self::BlockedOnMutex {
        avr_oxide::oserror::halt(OsError::BadThreadState);
      }
      if new_state == Self::BlockedOnQueue {
        avr_oxide::oserror::halt(OsError::BadThreadState);
      }
      if new_state == Self::BlockedOnEvent {
        avr_oxide::oserror::halt(OsError::BadThreadState);
      }
      if new_state == Self::BlockedOnThread {
        avr_oxide::oserror::halt(OsError::BadThreadState);
      }
    }

    // Check background thread not trying to die or block
    #[cfg(feature="runtime_checks")]
    if *self == Self::BackgroundSchedulable {
      avr_oxide::oserror::halt(OsError::BadThreadState);
    }

    *self = new_state;
  }

  /// This thread can be reaped
  fn can_be_reaped(&self) -> bool {
    *self == Self::Dead
  }

  pub(crate) fn to_debug_str(&self) -> &str {
    match self {
      ThreadState::Schedulable => "RDY",
      ThreadState::BackgroundSchedulable => "bgd",
      ThreadState::Suspended => "Sus",
      ThreadState::Zombie => "Zby",
      ThreadState::Dead => "XXX",
      ThreadState::BlockedOnMutex => "B:M",
      ThreadState::BlockedOnThread => "B:T",
      ThreadState::BlockedOnQueue => "B:Q",
      ThreadState::BlockedOnEvent => "B:E",
    }
  }
}

/**
 * Initialise the concurrency system.  Must be called once and only once,
 * and before interrupts have been enabled.
 */
pub(crate) fn initialise() {
  unsafe {
    const NOTHREAD: Option<ThreadContext> = None;

    core::ptr::write(SCHEDULER.as_mut_ptr(),
                     SchedulerState {
                       threads: [NOTHREAD; MAX_THREADS],
                     });

    stack::kernel::initialise();
  }

  thread::Builder::new().stack_size(IDLE_THREAD_STACK_SIZE).spawn(idle_thread);
}



pub(crate) unsafe fn get_current_thread(isotoken: interrupt::token::Isolated) -> &'static mut ThreadContext {
  core::mem::transmute(cpu!().get_processor_context(isotoken))
}

pub(crate) unsafe fn get_thread_by_id(thread_id: ThreadId) -> &'static mut ThreadContext {
  let scheduler = SCHEDULER.assume_init_mut();

  match &mut scheduler.threads[thread_id] {
    None => {
      avr_oxide::oserror::halt(avr_oxide::oserror::OsError::InternalError);
    },
    Some(thread) => {
      thread
    }
  }
}

pub(crate) unsafe fn try_get_thread_by_id(thread_id: ThreadId) -> &'static mut Option<ThreadContext> {
  let scheduler = SCHEDULER.assume_init_mut();

  &mut scheduler.threads[thread_id]
}

pub fn current_thread_id(isotoken: interrupt::token::Isolated) -> ThreadId {
  unsafe {
    cpu!().get_processor_context(isotoken).tid
  }
}

pub(crate) fn set_current_thread_state(isotoken: interrupt::token::Isolated, new_state: ThreadState) {
  unsafe {
    get_current_thread(isotoken).state.change(new_state);
  }
}

pub(crate) fn try_set_thread_state(_isotoken: interrupt::token::Isolated, thread_id: ThreadId, new_state: ThreadState) -> OxideResult<(),OsError> {
  unsafe {
    match try_get_thread_by_id(thread_id) {
      Some(thread) => {
        thread.state.change(new_state);
        Ok(())
      },
      None => {
        Err(OsError::NoSchedulableThreads)
      }
    }
  }
}

pub(crate) fn set_thread_state(_isotoken: interrupt::token::Isolated, thread_id: ThreadId, new_state: ThreadState) {
  unsafe {
    get_thread_by_id(thread_id).state.change(new_state);
  }
}

pub(crate) fn release_all_threads_and_clear(isotoken: interrupt::token::Isolated, threads: &mut ThreadSet) {
  threads.do_each_consuming(isotoken, |isotoken, thread_id|{
    let _ = try_set_thread_state(isotoken, thread_id, ThreadState::Schedulable);
    true
  });
}

impl ThreadContext {
  pub(crate) fn halt_if_invalid(&self) {
    if self.guard.read() != 0xf0 {
      avr_oxide::oserror::halt(OsError::KernelGuardCrashed);
    }
  }
}

/**
 * This is how we actually start a thread; we need to enable interrupts
 * before calling the thread function itself; after the thread completes,
 * we also need to collect the return value and update the thread status.
 */
pub(super) unsafe extern "C" fn thread_entrypoint() -> () {
  // When I arrive here, it's after falling out of a "restore_thread_context".
  // I need to do a little prep-work here to make sure all is well - execute
  // a `reti` instruction etc. to get the interrupt system back in play
  #[cfg(target_arch="avr")]
  core::arch::asm!(
    "      sbic {context_flags_reg},{flag_enableints}", // tells us to enable interrupts
    "      sei",
    "      sbic {context_flags_reg},{flag_reti}", // tells us we need to execute a reti to
                                        // let the '4809 we exited an ISR
    "      call 1f",
    "      jmp 2f",
    "1:", "reti",
    "2:", "nop",
    context_flags_reg = const(avr_oxide::hardware::cpu::cpuregs::IOADR_CONTEXT_FLAGS),
    flag_enableints = const(avr_oxide::hal::generic::cpu::CONTEXT_FLAG_ENABLEINTS),
    flag_reti = const(avr_oxide::hal::generic::cpu::CONTEXT_FLAG_RETI)
  );

  let thread = avr_oxide::concurrency::interrupt::isolated(|isotoken|{
    get_current_thread(isotoken)
  });

  let code = thread.entrypoint.take().unwrap();
  let res = (code)();

  avr_oxide::concurrency::interrupt::isolated(|isotoken|{
    thread.returncode = res;
    thread.state.change(ThreadState::Zombie);

    // Dispose of the stack
    core::mem::drop(thread.stack.take());

    // Note that this is a little risky, in that I'm actually sitting on
    // this very stack as I speak.  But, it's OK because for anything to
    // corrupt the stack (which is on the heap) it would need to alloc(),
    // and since alloc() only happens in userland, it can't happen until
    // threads have been switched - by which time, I don't care about this
    // stack any more.

    // Wake up all the threads that are waiting on this thread
    // (There should only be one of course :-).)
    release_all_threads_and_clear(isotoken, &mut thread.waiting_threads);
  });
  userland_schedule_and_switch();
}


/// Check all threads are valid, and halt if not
#[inline(never)] // Workaround for LLVM bugs
unsafe fn check_all_threads_valid() {
  // Check all threads are valid (this is a sanity/safety check)
  for i in 0..MAX_THREADS {
    if let Some(thread) = &instance().threads[i] {
      thread.halt_if_invalid();
      // Also check the stack wasn't crashed
      match &thread.stack {
        Some(stack) => {
          stack.halt_if_stack_crashed();
        },
        None => {}
      }
    }
  }
}

/**
 * Schedule the next thread for execution.  That simply means setting the
 * processor context flag to point at the next schedulable thread.  The
 * thread will begin execution when `restore_thread_context` is called
 * (typically on the return from whatever interrupt service routine we're
 * in).
 */
pub(crate) fn schedule_next_thread(isotoken: avr_oxide::concurrency::interrupt::token::Isolated) {
  unsafe {
    #[cfg(feature="runtime_checks")]
    check_all_threads_valid();

    let mut candidate = current_thread_id(isotoken);

    // Look for a 'regular' priority thread to schedule
    for _i in 0..MAX_THREADS {
      candidate = (candidate + 1) % MAX_THREADS;

      if let Some(thread_context) = &mut instance().threads[candidate] {
        if thread_context.state == ThreadState::Schedulable {
          // We found a schedulable thread - yay
          __NEXT_THREAD_CONTEXT.write_isolated(isotoken, &mut thread_context.cpu_context as *mut ProcessorContext as usize);
          return;
        }
      }
    }

    // OK, none of our regular priority threads was schedulable.  Let's try
    // again looking for a background thread.
    for _i in 0..MAX_THREADS {
      candidate = (candidate + 1) % MAX_THREADS;
      if let Some(thread_context) = &mut instance().threads[candidate] {
        if thread_context.state == ThreadState::BackgroundSchedulable {
          // We found a schedulable thread - yay
          __NEXT_THREAD_CONTEXT.write_isolated(isotoken, &mut thread_context.cpu_context as *mut ProcessorContext as usize);
          return;
        }
      }
    }

    // If I got this far, nothing was schedulable.  This ought to be impossible
    // (we should always have an Idle thread at least.)
    avr_oxide::oserror::halt(avr_oxide::oserror::OsError::NoSchedulableThreads);
  }
}

/**
 * Clean up any old, dead threads, freeing up the memory allocated to their
 * thread context/stack.
 */
pub(crate) fn reap_dead_threads(_isotoken: interrupt::token::Isolated) {
  unsafe {
    let scheduler = SCHEDULER.assume_init_mut();

    for i in 0..MAX_THREADS {
      match &scheduler.threads[i] {
        None => {},
        Some(thread) => {
          if thread.state.can_be_reaped() {
            // `DON'T THINK OF IT AS DYING`
            let farewell = scheduler.threads[i].take();
            // `THINK OF IT AS LEAVING EARLY TO AVOID THE RUSH`
            core::mem::drop(farewell);
          }
        }
      }
    }
  }
}

/**
 * Save and switch contexts called from userland (rather than an ISR.)
 */
pub(crate) unsafe extern "C" fn userland_schedule_and_switch() {
  interrupt::isolated(|isotoken|{
    schedule_next_thread(isotoken);

    #[cfg(target_arch="avr")]
    core::arch::asm!(
    "     call save_thread_context",
    "     sbic {context_flags_reg},{flag_restored}",
    "     jmp 1f",
    "     jmp  restore_thread_context",
    "1:", // At this point I've been restored.  It's possible I'm actually in
          // an ISR now, so I use the context flags to tell me how to
          // get back to userland
    "     sbic {context_flags_reg},{flag_enableints}", // Tells us to enable ints
    "     sei",
    "     sbic {context_flags_reg},{flag_reti}", // Tells us to reti
    "     call 1f",
    "     jmp 2f",
    "1:","reti",
    "2:","nop",
    context_flags_reg = const(avr_oxide::hardware::cpu::cpuregs::IOADR_CONTEXT_FLAGS),
    flag_enableints = const(avr_oxide::hal::generic::cpu::CONTEXT_FLAG_ENABLEINTS),
    flag_reti = const(avr_oxide::hal::generic::cpu::CONTEXT_FLAG_RETI),
    flag_restored = const(avr_oxide::hal::generic::cpu::CONTEXT_FLAG_RESTORED),
    options(preserves_flags)
    );
  });
}


/**
 * Call me only once ;).  I trigger the first thread execution; I do this
 * by loading its context and then returning from interrupt.  (No, I know
 * I'm not in an interrupt - but this has the effect I want, which is to
 * enable interrupts once the new thread begins executing.)
 *
 * The restore_thread_context function puts the thread's PC on the stack,
 * so the reti will return to begin execution from the beginning of the
 * thread.
 */
pub(crate) unsafe fn restore_first_thread() -> ! {
  // Thread 0 should always exist - it's the idle thread I created in
  // the initialise() method
  if let Some(thread_context) = &mut instance().threads[0] {
    __NEXT_THREAD_CONTEXT.write(&mut thread_context.cpu_context as *mut ProcessorContext as usize);

    #[cfg(target_arch="avr")]
    core::arch::asm!(
    "  jmp restore_thread_context",
    // This function will 'return' to the beginning of our thread
    options(noreturn));
    #[cfg(not(target_arch="avr"))]
    core::arch::asm!("nop",options(noreturn));
  } else {
    avr_oxide::oserror::halt(avr_oxide::oserror::OsError::NoSchedulableThreads);
  }
}

// Tests =====================================================================