1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
/* alloc.rs
 *
 * Developed by Tim Walls <tim.walls@snowgoons.com>
 * Copyright (c) All Rights Reserved, Tim Walls
 */
//! A simple dynamic allocator implementation for our embedded devices.
//!
//! Based on a First-Fit algorithm as described by:
//!
//! R. P. Brent. 1989. Efficient implementation of the first-fit strategy for
//! dynamic storage allocation.
//! ACM Trans. Program. Lang. Syst. 11, 3 (July 1989), 388–403.
//! DOI:<https://doi.org/10.1145/65979.65981>

// Imports ===================================================================
use avr_oxide::deviceconsts::memory;

use core::alloc::{GlobalAlloc, Layout};
use core::cell::UnsafeCell;

#[cfg(target_arch="avr")]
extern crate alloc;
#[cfg(target_arch="avr")]
pub use alloc::vec;
#[cfg(target_arch="avr")]
pub use alloc::boxed;
#[cfg(target_arch="avr")]
pub use alloc::rc;
#[cfg(target_arch="avr")]
pub use alloc::string;


#[cfg(not(target_arch="avr"))]
pub use std::boxed;
#[cfg(not(target_arch="avr"))]
pub use std::vec;
#[cfg(not(target_arch="avr"))]
pub use std::rc;


// Declarations ==============================================================

// Make sure we are word aligned (thus our heap will be as well), and also
// that we have a predictable data representation (since we'll be doing
// pointer mangling into/out of our heap array.)
#[repr(C,align(2))]
struct FirstFitHeap<const SEGS: usize, const S2: usize> {
  v:  &'static mut [i16],
  pa: [u16; SEGS],
  st: [i16; S2],
  s:  usize,
  words_per_seg: usize
}


struct GlobalAllocator<const S:usize, const S2:usize> {
  heap: UnsafeCell<FirstFitHeap<S,S2>>
}

// If this is enabled when we do unit tests, then it will replace the default
// allocator /for the unit test environment/, and then our unit tests will
// bomb out because our allocator isn't really designed to run on the host
// environment (and will not have been inititalised.)
//
//
#[cfg(target_arch="avr")]
#[global_allocator]
static mut GLOBAL: GlobalAllocator::<{memory::alloc::SMAX}, {memory::alloc::SMAX * 2}> = GlobalAllocator {
  heap: UnsafeCell::new(FirstFitHeap::new())
};

// Code ======================================================================
#[cfg(target_arch="avr")]
#[alloc_error_handler]
fn _alloc_error(_: core::alloc::Layout) -> ! {
  avr_oxide::oserror::halt(avr_oxide::oserror::OsError::OutOfMemory);
}

/// Allocate memory using the AVRoxide global allocator implementation
#[cfg(target_arch="avr")]
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
  GLOBAL.alloc(layout)

}

/// Allocate memory using the AVRoxide global allocator implementation
#[cfg(not(target_arch="avr"))]
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
  std::alloc::alloc(layout)

}


/// Deallocate memory allocated using the AVRoxide global allocator implementation
#[cfg(target_arch="avr")]
pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
  GLOBAL.dealloc(ptr, layout)
}

/// Deallocate memory allocated using the AVRoxide global allocator implementation
#[cfg(not(target_arch="avr"))]
pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
  std::alloc::dealloc(ptr, layout)
}


/**
 * Must be called to initialise the global allocator before any allocations
 * are attempted.  We will use all remaining memory on the device (after
 * the data/BSS segments) as our heap.  We calculate this using the constant
 * for the current device's memory max, and the `__bss_end` value exported by the
 * linker.
 */
#[cfg(target_arch="avr")]
pub fn initialise() {
  unsafe {
    let heap = avr_oxide::panic_if_none!(GLOBAL.heap.get().as_mut(), avr_oxide::oserror::OsError::InternalError);

    let mut heap_start_hi:u8;
    let mut heap_start_lo:u8;
    core::arch::asm!{
      "ldi {}, hi8(__heap_start)",
      "ldi {}, lo8(__heap_start)",
      out(reg) heap_start_hi,
      out(reg) heap_start_lo
    }

    let heap_start = (heap_start_hi as usize) << 8 | heap_start_lo as usize;

    let heap_size = avr_oxide::deviceconsts::memory::SRAM_END as isize - heap_start as isize;

    // Fail if there is not enough memory for a usefully functioning heap
    if heap_start == 0 || heap_size < avr_oxide::deviceconsts::memory::alloc::MIN_HEAPSIZE as isize {
      avr_oxide::oserror::halt(avr_oxide::oserror::OsError::NotEnoughRam);
    }
    heap.set_heap_location(heap_start as *mut u8, heap_size as usize);
    heap.init();
  }
}

/**
 * Implementation of the Rust GlobalAlloc trait for our First-Fit allocator,
 * allowing it to be used as the Rust global allocator.
 */
unsafe impl<const S:usize, const S2:usize> GlobalAlloc for GlobalAllocator<S,S2> {
  unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
    let heap = avr_oxide::panic_if_none!(self.heap.get().as_mut(), avr_oxide::oserror::OsError::InternalError);

    // Calculate the size we need to allocate for the 'worst case' amount of
    // padding needed to achieve the requested alignment
    let align_words = match layout.align() {
      0 => 0,
      1 => 0,
      _other => (layout.align()/2)-1
    };

    let size_words=  ((layout.size() + 1) / 2) + align_words;

    // Allocate, including space for padding if needed
    let ptr = avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
      match heap.bl_new(size_words){
        0 => {
          core::ptr::null_mut()
        },
        index => {
          core::mem::transmute(&heap.v[index])
        }
      }
    });

    // Note that while we are updating the heap data directly here, we're
    // only doing it 'inside' the block of memory we just allocated, so it's
    // safe to do so outside the mutual exclusion lock.

    // Now modify the returned pointer so it matches the requested alignment
    match (ptr as usize) % layout.align() {
      0 => ptr, // We already have the requested alignment
      misalignment => {
        // We need to pad.  That means we need to set a flag, a positive number
        // in the word immediately before our returned pointer that will tell
        // us where to find the *actual* starting index.
        // (This works because normally that would be the control word,
        // with a *negative* pointer indicating the allocated size.  So if
        // that field is positive, we know we are not at the beginning of
        // our block)
        let padding_needed = (layout.align() - misalignment) as isize;

        let returned_ptr = ptr.offset(padding_needed);
        let flag = returned_ptr.offset(-2) as *mut i16; // -2 because 'word'
        *flag = (padding_needed as i16)/2; // Store as words not bytes
        // Note that the spec for layout tells us alignment is *always* a
        // power of two, so this is 'safe'

        returned_ptr
      }
    }
  }

  unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
    let heap = avr_oxide::panic_if_none!(self.heap.get().as_mut(), avr_oxide::oserror::OsError::InternalError);

    let index0:usize = core::mem::transmute(&heap.v[0]);
    let mut index = ((ptr as usize) - index0)/2;

    avr_oxide::concurrency::interrupt::isolated(|_isotoken|{
      // Check if this allocation actually contains alignment padding,
      // and if so adjust the pointer
      if heap.v[index-1] > 0 {
        index -= heap.v[index-1] as usize;
      }

      // Now we can dispose knowing that the index is the start of the allocated
      // region.

      heap.bl_dispose(index);
    });
  }
}
impl<const SEGS: usize, const S2: usize> FirstFitHeap<SEGS,S2> {
  /**
   * Create a new instance of the first-fit heap structure.  You must call
   * the init() method before first making any allocations from it.
   */
  const fn new() -> Self {
    FirstFitHeap {
      // We actually only need to initialise v[0] to reflect the whole block
      // is free - but by copying the value across the whole heap we can use
      // a static allocator
      v:  &mut [],
      pa: [0; SEGS],
      st: [0; S2],
      s: 0,
      words_per_seg: 0
    }
  }

  /**
   * Set the location of the heap to an address in memory, of the given size
   * in bytes.  Must be called before `heap.init()`
   */
  fn set_heap_location(&mut self, memory: *mut u8, size_bytes: usize){
    unsafe {
      self.v = core::slice::from_raw_parts_mut(memory as *mut i16, size_bytes/2)
    }
  }

  /**
   * Perform such initialisation as is required to make the heap ready for
   * allocations.  Must be called before any calls to `bl_new()` etc.
   */
  fn init(&mut self) {
    let heapsize_words = self.v.len() as i16;

    if heapsize_words == 0 {
      avr_oxide::oserror::halt(avr_oxide::oserror::OsError::NotEnoughRam);
    }

    self.s     = 1;
    self.st[1] = heapsize_words;
    self.pa[0] = 0;
    self.v[0]  = heapsize_words;
    self.words_per_seg = heapsize_words as usize/SEGS;

    self.bl_new(0);
  }

  /**
   * Return the segment which contains the given heap address.
   */
  fn bl_seg(&self, addr: usize) -> usize {
    self.s + (addr/self.words_per_seg)
  }

  /**
   * Double the number of segments in use.  Assumes the current number is
   * less than (smax/2).
   */
  #[optimize(speed)]
  fn bl_double(&mut self) {
    let heapsize_max = self.v.len() - 1;

    if !(self.s <= SEGS/2) {
      avr_oxide::oserror::halt(avr_oxide::oserror::OsError::InternalError);
    }

    for i in self.s ..= 2*self.s-1 {
      self.pa[i] = heapsize_max as u16;
    }
    let mut k = self.s;

    while k > 0 {
      for i in 0 ..= k-1 {
        self.st[2*k+i] = self.st[k+i];
        self.st[3*k+i] = 0;
      }

      k /= 2;
    }

    self.st[1] = self.st[2];
    self.s *= 2;
  }

  /**
   * Do the housekeeping necessary if a block with control word `self.v[addr]`
   * will be allocated or merged with a block on its left in a different
   * segment.
   */
  #[optimize(speed)]
  fn bl_fix_left(&mut self, addr: usize) {
    let heapsize_max = self.v.len() - 1;

    let mut j = self.bl_seg(addr);
    if (self.v[addr] <= 0) || (self.st[j] as i16 <= self.v[addr]) {

      let mut pj = self.pa[j-self.s] as usize; // Index of first block in segment
      let mut pn = (j+1-self.s) * self.words_per_seg; // Start of next segment
      if pn > heapsize_max { // Bound last segment to total heap length
        pn = heapsize_max;
      }
      let mut mx = if pj < pn {
        // There is a block starting in this segment
        let mut mx = 1i16;
        while pj < pn {
          if mx < self.v[pj] {
            mx = self.v[pj];
          }
          pj = pj + self.v[pj].abs() as usize;
        }
        mx
      } else {
        // There is no block starting in this segment
        0
      };
      self.st[0] = 0; // Sentinel
      while self.st[j] > mx { // Update segment tree
        self.st[j] = mx;
        let sister = match j % 2 {
          1 => self.st[j-1], // j is odd
          _ => self.st[j+1]  // j is even
        };
        if mx < sister {
          mx = sister;
        }
        j /= 2;
      }
    }
  }

  /**
   * Do the housekeeping necessary after a block with control word `self.v[addr]`
   * is freed or merged with a block on its right, or created by splitting
   * (with addr on the right of the split)
   */
  // We get weird bugs with this routine if we don't have optimize(speed).
  // Reasons why are wholly mysterious.
  #[optimize(speed)]
  fn bl_fix_right(&mut self, addr: usize) {
    let mut j = self.bl_seg(addr);
    // If necessary (and possible) double the number of segments
    while j >= 2*self.s && self.s <= SEGS/2 {
      self.bl_double();
      j = self.bl_seg(addr);
    }
    if self.pa[j-self.s] > addr as u16 {
      self.pa[j-self.s] = addr as u16;
    }
    let vp = self.v[addr];
    self.st[0] = vp; // Sentinel
    while self.st[j] < vp { // Go up branch of segment tree
      self.st[j] = vp;
      j /= 2;
    }
  }

  /**
   * Return the predecessor of block p (there will always be one because of
   * the dummy first block).  The address given must be the index of a
   * control word (as will be the index returned)
   */
  #[optimize(speed)]
  fn bl_pred(&self, addr: usize) -> usize {
    let mut j = self.bl_seg(addr);
    if self.pa[j-self.s] == addr as u16 {
      while self.st[j-1] == 0 {
        j /= 2;
      }
      j -= 1;
      while j<self.s {
        if self.st[2*j+1] > 0 {
          j = 2*j+1;
        } else {
          j *= 2;
        }
      }
    }
    let mut qn = self.pa[j-self.s];
    let mut q  = qn;
    while qn != addr as u16 {
      q = qn;
      qn = qn+self.v[qn as usize].abs() as u16;
    }
    q as usize
  }

  /**
   * Return the index to a block of at least `size` words, or 0 if there
   * is not enough free space.
   */
  #[optimize(speed)]
  fn bl_new(&mut self, size: usize) -> usize {
    if self.st[1] <= size as i16 { // Not enough free space
      0
    } else {
      let n = size +1; // Length including control word
      let mut j = 1;
      while j < self.s {
        if self.st[2*j] >= n as i16 {
          j *= 2;
        } else {
          j = 2*j+1;
        }
      }
      let mut p = self.pa[j-self.s] as usize; // Index of control word of first block in segment j
      while self.v[p] < n as i16 {
        p = p + self.v[p].abs() as usize;
      }
      // p is now the index of control word of required block
      let vp = self.v[p];
      self.v[p] = -(n as i16); // Flag block as allocated
      let fix_left = vp == self.st[j];
      if vp > n as i16 {
        self.v[p+n] = vp - (n as i16);
        if self.bl_seg(p+n) > j {
          self.bl_fix_right(p+n);
        }
      }
      if fix_left {
        self.bl_fix_left(p);
      }
      p+1 // Return index of first word after control word
    }
  }

  /**
   * Dispose of a block obtained with `bl_new(size)`.  The given address is
   * an index that was returned by `bl_new`.
   */
  #[optimize(speed)]
  fn bl_dispose(&mut self, addr: usize) {
    let heapsize_max = self.v.len() - 1;
    let p = addr-1; // Index of control word
    let vp = -self.v[p];
    if vp > 0 {
      self.v[p] = vp; // Mark the block as free
      let j = self.bl_seg(p);
      let pn = p + vp as usize;
      if pn < heapsize_max && self.v[pn] >= 0 {
        // Next block is empty, so we can merge
        self.v[p] = vp + self.v[pn];
        let jn = self.bl_seg(pn);
        if jn > j {
          self.pa[jn-self.s] = (p as i16 + self.v[p]) as u16;
          self.bl_fix_left(pn);
        }
      }
      let pr = self.bl_pred(p); // Preceding block
      if self.v[pr] >= 0 {
        // Preceding block is empty, so we can merge
        self.v[pr] += self.v[p];
        if self.pa[j-self.s] as usize == p {
          self.pa[j-self.s] = (pr as i16 + self.v[pr]) as u16; // Point to first block in segment
          self.bl_fix_left(p);
        }
        self.bl_fix_right(pr);
      } else if self.v[p] > self.st[j] {
        self.bl_fix_right(p);
      }
    } else {
      // Double-Dispose is an error
      avr_oxide::oserror::halt(avr_oxide::oserror::OsError::InternalError);
    }
  }

  /**
   * Returns the size of a block allocated by a call to `bl_new()`.
   */
  #[allow(dead_code)]
  fn bl_size(&self, addr: usize) -> usize {
    let p = self.v[addr-1];

    if p > 0 {
      avr_oxide::oserror::halt(avr_oxide::oserror::OsError::InternalError);
    } else {
      -(p+1) as usize
    }
  }

  /**
   * Return the maximum RAM available to allocate (i.e. the largest size
   * for which `bl_new(size)` can be expected to succeed.)  This is
   * approximately the same thing as 'available space', although given the
   * effect of fragmentation the total number of unallocated bytes may be
   * larger than this.
   */
  #[allow(dead_code)]
  fn bl_available(&self) -> usize {
    self.st[1] as usize - 1
  }

  /**
   * Return the number of segments used
   */
  #[allow(dead_code)]
  fn bl_segments(&self) -> usize {
    self.s
  }
}

// Tests =====================================================================
#[cfg(test)]
mod test {
  use std::vec::Vec;
  use avr_oxide::alloc::*;

  #[test]
  fn test_create_heap() {
    let mut memory = [0u8;1024];
    let mut heap = FirstFitHeap::<32,64>::new();
    heap.set_heap_location(&mut memory as *mut u8, 1024);
    heap.init();
    assert_eq!(heap.bl_seg(1), 1);
  }

  #[test]
  fn test_alloc_free() {
    let mut memory = [0u8;1024];
    let mut heap = FirstFitHeap::<32,64>::new();
    heap.set_heap_location(&mut memory as *mut u8, 1024);
    heap.init();

    let init_free = heap.bl_available();
    println!("Heap with {} words available\n", init_free);

    let alloc1 = heap.bl_new(128);
    println!("First allocation, {} bytes @ {}", heap.bl_size(alloc1), alloc1);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert!(heap.bl_size(alloc1) >= 128);
    assert_eq!(alloc1, 2);

    let alloc2 = heap.bl_new(64);
    println!("Second allocation, {} bytes @ {}", heap.bl_size(alloc2), alloc2);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert!(heap.bl_size(alloc2) >= 64);
    assert_eq!(alloc2, 131);

    let alloc3 = heap.bl_new(27);
    println!("Third allocation, {} bytes @ {}", heap.bl_size(alloc3), alloc3);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert!(heap.bl_size(alloc3) >= 27);
    assert_eq!(alloc3, 196);

    let alloc4 = heap.bl_new(256);
    println!("Fourth allocation, {} bytes @ {}", heap.bl_size(alloc4), alloc4);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert!(heap.bl_size(alloc4) >= 256);
    assert_eq!(alloc4, 224);

    let alloc5 = heap.bl_new(31);
    println!("Fifth allocation, {} bytes @ {}", heap.bl_size(alloc5), alloc5);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert!(heap.bl_size(alloc5) >= 31);
    assert_eq!(alloc5, 481);

    // OK, we allocated a load, now let's free some
    println!("Freeing allocation @ {}", alloc3);
    heap.bl_dispose(alloc3);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert_eq!(heap.bl_available(), 27);

    // Now let's allocate a couple more small chunks
    let alloc6 = heap.bl_new(4);
    println!("Sixth allocation, {} bytes @ {}", heap.bl_size(alloc6), alloc6);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert!(heap.bl_size(alloc6) >= 4);
    assert_eq!(alloc6, 196);

    let alloc7 = heap.bl_new(10);
    println!("Seventh allocation, {} bytes @ {}", heap.bl_size(alloc7), alloc7);
    println!("Free space: {}", heap.bl_available());
    println!("Segments: {}", heap.bl_segments());
    assert!(heap.bl_size(alloc7) >= 10);
    assert_eq!(alloc7, 201);

    // If we got this far, let's free everything and then see how much space
    // we have left
    heap.bl_dispose(alloc1);
    println!("Free space: {}", heap.bl_available());
    heap.bl_dispose(alloc2);
    println!("Free space: {}", heap.bl_available());
    heap.bl_dispose(alloc4);
    println!("Free space: {}", heap.bl_available());
    heap.bl_dispose(alloc5);
    println!("Free space: {}", heap.bl_available());
    heap.bl_dispose(alloc6);
    println!("Free space: {}", heap.bl_available());
    heap.bl_dispose(alloc7);
    println!("Free space: {}", heap.bl_available());

    assert_eq!(heap.bl_available(), init_free);
  }

  fn fillspace(addr: *mut u8, size: usize, value: u8) {
    unsafe {
      for i in 0..size as isize {
        *(addr.offset(i)) = value;
      }
    }
  }

  fn checkspace(addr: *mut u8, size: usize, value: u8) -> bool {
    unsafe {
      for i in 0..size as isize {
        if *(addr.offset(i)) != value {
          return false;
        }
      }
    }
    true
  }

  #[test]
  pub fn test_global_alloc() {
    unsafe {
      let mut memory = [0u8;1024];
      let global = GlobalAllocator {
        heap: UnsafeCell::new(FirstFitHeap::<32,64>::new())
      };
      let heap = global.heap.get().as_mut().unwrap();
      heap.set_heap_location(&mut memory as *mut u8, 1024);
      heap.init();


      // Do an allocation
      let addr = global.alloc(Layout::from_size_align(16,2).unwrap());
      fillspace(addr, 16,0xde);
      println!("Allocated a block @ address: {:?}", addr);
      println!("Free space: {}b", heap.bl_available()*2);
      assert_eq!(addr as usize % 2, 0); // Check the returned pointer has correct alignment

      // Now free it
      global.dealloc(addr, Layout::from_size_align(16,2).unwrap());
      println!("Freed block");
      println!("Free space: {}b", heap.bl_available()*2);

      // Do some other alignments
      println!("Alignment checks");
      let discard = global.alloc(Layout::from_size_align(3, 2).unwrap());
      let addr1 = global.alloc(Layout::from_size_align(175,4).unwrap());
      fillspace(addr1, 175, 0xde);
      println!("Allocated a block @ address: {:?}", addr1);
      println!("Free space: {}b", heap.bl_available()*2);
      assert_eq!(addr1 as usize % 4, 0); // Check the returned pointer has correct alignment

      let addr2 = global.alloc(Layout::from_size_align(230, 8).unwrap());
      fillspace(addr2, 230, 0xde);
      println!("Allocated a block @ address: {:?}", addr2);
      println!("Free space: {}b", heap.bl_available()*2);
      assert_eq!(addr2 as usize % 8, 0); // Check the returned pointer has correct alignment

      let addr3 = global.alloc(Layout::from_size_align(49, 16).unwrap());
      fillspace(addr3, 49, 0xde);
      println!("Allocated a block @ address: {:?}", addr3);
      println!("Free space: {}b", heap.bl_available()*2);
      assert_eq!(addr3 as usize % 16, 0); // Check the returned pointer has correct alignment

      println!("Checking for data corruption");
      assert!(checkspace(addr1, 175,0xde));
      assert!(checkspace(addr2, 230,0xde));
      assert!(checkspace(addr3, 49,0xde));

      println!("Freeing aligned allocations");
      global.dealloc(addr1, Layout::from_size_align(175,4).unwrap());
      assert!(checkspace(addr2, 230,0xde));
      assert!(checkspace(addr3, 49,0xde));
      global.dealloc(addr2, Layout::from_size_align(230, 8).unwrap());
      assert!(checkspace(addr3, 49,0xde));
      global.dealloc(addr3, Layout::from_size_align(49, 16).unwrap());
    }
  }

  #[test]
  pub fn soaktest_global_alloc(){
    // Remember: global alloc allocates 16bit words, not 8-bit bytes,
    // so the heapsize here is (device heap in `deviceconsts.rs`/2)
    // ATmega328P small
    soaktest_global_alloc_sized::<64,8,{8*2}>();  // '328p Small
    soaktest_global_alloc_sized::<128,8,{8*2}>(); // '328p Medium
    soaktest_global_alloc_sized::<256,16,{16*2}>(); // '328p Large

    soaktest_global_alloc_sized::<256,32,{32*2}>();  // '4809 Small
    soaktest_global_alloc_sized::<512,64,{64*2}>(); // '4809 Medium
    soaktest_global_alloc_sized::<1536,128,{128*2}>(); // '4809 Large
  }

  pub fn soaktest_global_alloc_sized<const SIZE:usize,const SMAX:usize,const S2:usize>() {
    println!("");
    println!("==========================");
    println!("==  ALLOCATOR SOAKTEST  ==");
    println!("==========================");
    println!("  Heap:     {} words", SIZE);
    println!("  Segments: {} (max)", SMAX);

    let mut memory = [0u16; SIZE];


    unsafe {
      let global = GlobalAllocator {
        heap: UnsafeCell::new(FirstFitHeap::<SMAX,S2>::new())
      };

      let mut heap = global.heap.get().as_mut().unwrap();
      heap.set_heap_location(&mut memory as *mut u16 as *mut u8, {SIZE*2});
      heap.init();

      for blocksize in 1..SMAX {
        println!("Testing blocks of size {}", blocksize);

        let mut blocks : Vec<*mut u8> = Vec::new();

        let mut i:i32 = 0;
        loop {
          let block = global.alloc(Layout::from_size_align(blocksize,1).unwrap());

          if block != core::ptr::null_mut() {
            fillspace(block,blocksize,i as u8);
            blocks.push(block);
          } else {
            break;
          }
          i += 1;
        }

        println!("  Allocated {} blocks", blocks.len());
        let mut i:i32 = 0;
        for block in blocks {
          assert!(checkspace(block,blocksize,i as u8));
          global.dealloc(block,Layout::from_size_align(blocksize,1).unwrap());
          i += 1;
        }
      }
    }
  }

}