From b796e3d848b30346c11d173f39c06df88c646e2f Mon Sep 17 00:00:00 2001
From: Damien George <damien.p.george@gmail.com>
Date: Thu, 28 Aug 2014 10:18:40 +0100
Subject: [PATCH] py: Reduce fragmentation of GC heap.

Recent speed up of GC allocation made the GC have a fragmented heap.
This patch restores "original fragmentation behaviour" whilst still
retaining relatively fast allocation.  This patch works because there is
always going to be a single block allocated now and then, which advances
the gc_last_free_atb_index pointer often enough so that the whole heap
doesn't need scanning.

Should address issue #836.
---
 py/gc.c | 16 +++++++---------
 1 file changed, 7 insertions(+), 9 deletions(-)

diff --git a/py/gc.c b/py/gc.c
index a8463fe9b..53bcb069a 100644
--- a/py/gc.c
+++ b/py/gc.c
@@ -386,13 +386,6 @@ void *gc_alloc(mp_uint_t n_bytes, bool has_finaliser) {
             if (ATB_2_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 2; goto found; } } else { n_free = 0; }
             if (ATB_3_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 3; goto found; } } else { n_free = 0; }
         }
-        for (i = 0; i < gc_last_free_atb_index; i++) {
-            byte a = gc_alloc_table_start[i];
-            if (ATB_0_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 0; goto found; } } else { n_free = 0; }
-            if (ATB_1_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 1; goto found; } } else { n_free = 0; }
-            if (ATB_2_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 2; goto found; } } else { n_free = 0; }
-            if (ATB_3_IS_FREE(a)) { if (++n_free >= n_blocks) { i = i * BLOCKS_PER_ATB + 3; goto found; } } else { n_free = 0; }
-        }
 
         // nothing found!
         if (collected) {
@@ -409,8 +402,13 @@ found:
     end_block = i;
     start_block = i - n_free + 1;
 
-    // set last free ATB index to last block we found, for start of next scan
-    gc_last_free_atb_index = i / BLOCKS_PER_ATB;
+    // Set last free ATB index to block after last block we found, for start of
+    // next scan.  To reduce fragmentation, we only do this if we were looking
+    // for a single free block, which guarantees that there are no free blocks
+    // before this one.
+    if (n_free == 1) {
+        gc_last_free_atb_index = (i + 1) / BLOCKS_PER_ATB;
+    }
 
     // mark first block as used head
     ATB_FREE_TO_HEAD(start_block);
-- 
GitLab