From 0b610de017f01c40a055f79db3837f51f903b4d6 Mon Sep 17 00:00:00 2001
From: Damien George <damien.p.george@gmail.com>
Date: Mon, 29 Sep 2014 16:25:04 +0100
Subject: [PATCH] py: Make macro names in assemblers consistent, and tidy up a
 bit.

---
 py/asmarm.c          |  55 +++++++++--------
 py/asmarm.h          |  68 ++++++++++----------
 py/asmthumb.c        |  43 +++++--------
 py/asmthumb.h        |  64 +++++++++----------
 py/asmx64.c          | 108 ++++++++++++--------------------
 py/asmx64.h          |  36 +++++------
 py/asmx86.c          |  69 +++++++++++----------
 py/asmx86.h          |  22 +++----
 py/emitinlinethumb.c |  28 ++++-----
 py/emitnative.c      | 144 +++++++++++++++++++++----------------------
 10 files changed, 302 insertions(+), 335 deletions(-)

diff --git a/py/asmarm.c b/py/asmarm.c
index e1d4d84d9..0d5568692 100644
--- a/py/asmarm.c
+++ b/py/asmarm.c
@@ -45,9 +45,8 @@ struct _asm_arm_t {
     byte *code_base;
     byte dummy_data[4];
 
-    uint max_num_labels;
-    int *label_offsets;
-    int num_locals;
+    mp_uint_t max_num_labels;
+    mp_uint_t *label_offsets;
     uint push_reglist;
     uint stack_adjust;
 };
@@ -57,7 +56,7 @@ asm_arm_t *asm_arm_new(uint max_num_labels) {
 
     as = m_new0(asm_arm_t, 1);
     as->max_num_labels = max_num_labels;
-    as->label_offsets = m_new(int, max_num_labels);
+    as->label_offsets = m_new(mp_uint_t, max_num_labels);
 
     return as;
 }
@@ -66,7 +65,7 @@ void asm_arm_free(asm_arm_t *as, bool free_code) {
     if (free_code) {
         MP_PLAT_FREE_EXEC(as->code_base, as->code_size);
     }
-
+    m_del(mp_uint_t, as->label_offsets, as->max_num_labels);
     m_del_obj(asm_arm_t, as);
 }
 
@@ -74,7 +73,7 @@ void asm_arm_start_pass(asm_arm_t *as, uint pass) {
     as->pass = pass;
     as->code_offset = 0;
     if (pass == ASM_ARM_PASS_COMPUTE) {
-        memset(as->label_offsets, -1, as->max_num_labels * sizeof(int));
+        memset(as->label_offsets, -1, as->max_num_labels * sizeof(mp_uint_t));
     }
 }
 
@@ -127,7 +126,7 @@ STATIC void emit(asm_arm_t *as, uint op) {
 
 // Insert word into instruction flow, add "ALWAYS" condition code
 STATIC void emit_al(asm_arm_t *as, uint op) {
-    emit(as, op | ARM_CC_AL);
+    emit(as, op | ASM_ARM_CC_AL);
 }
 
 // Basic instructions without condition code
@@ -178,7 +177,7 @@ void asm_arm_bkpt(asm_arm_t *as) {
 
 // locals:
 //  - stored on the stack in ascending order
-//  - numbered 0 through as->num_locals-1
+//  - numbered 0 through num_locals-1
 //  - SP points to first local
 //
 //  | SP
@@ -194,30 +193,36 @@ void asm_arm_entry(asm_arm_t *as, int num_locals) {
     }
 
     as->stack_adjust = 0;
-    as->num_locals = num_locals;
-    as->push_reglist = 1 << REG_R1 | 1 << REG_R2 | 1 << REG_R3 | 1 << REG_R4
-            | 1 << REG_R5 | 1 << REG_R6 | 1 << REG_R7 | 1 << REG_R8;
+    as->push_reglist = 1 << ASM_ARM_REG_R1
+        | 1 << ASM_ARM_REG_R2
+        | 1 << ASM_ARM_REG_R3
+        | 1 << ASM_ARM_REG_R4
+        | 1 << ASM_ARM_REG_R5
+        | 1 << ASM_ARM_REG_R6
+        | 1 << ASM_ARM_REG_R7
+        | 1 << ASM_ARM_REG_R8;
 
     // Only adjust the stack if there are more locals than usable registers
     if(num_locals > 3) {
         as->stack_adjust = num_locals * 4;
         // Align stack to 8 bytes
-        if(as->num_locals & 1)
+        if (num_locals & 1) {
             as->stack_adjust += 4;
+        }
     }
 
-    emit_al(as, asm_arm_op_push(as->push_reglist | 1 << REG_LR));
+    emit_al(as, asm_arm_op_push(as->push_reglist | 1 << ASM_ARM_REG_LR));
     if (as->stack_adjust > 0) {
-        emit_al(as, asm_arm_op_sub_imm(REG_SP, REG_SP, as->stack_adjust));
+        emit_al(as, asm_arm_op_sub_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
     }
 }
 
 void asm_arm_exit(asm_arm_t *as) {
     if (as->stack_adjust > 0) {
-        emit_al(as, asm_arm_op_add_imm(REG_SP, REG_SP, as->stack_adjust));
+        emit_al(as, asm_arm_op_add_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
     }
 
-    emit_al(as, asm_arm_op_pop(as->push_reglist | (1 << REG_PC)));
+    emit_al(as, asm_arm_op_pop(as->push_reglist | (1 << ASM_ARM_REG_PC)));
 }
 
 void asm_arm_label_assign(asm_arm_t *as, uint label) {
@@ -289,8 +294,8 @@ void asm_arm_cmp_reg_reg(asm_arm_t *as, uint rd, uint rn) {
 
 void asm_arm_less_op(asm_arm_t *as, uint rd, uint rn, uint rm) {
     asm_arm_cmp_reg_reg(as, rn, rm); // cmp rn, rm
-    emit(as, asm_arm_op_mov_imm(rd, 1) | ARM_CC_LT); // movlt rd, #1
-    emit(as, asm_arm_op_mov_imm(rd, 0) | ARM_CC_GE); // movge rd, #0
+    emit(as, asm_arm_op_mov_imm(rd, 1) | ASM_ARM_CC_LT); // movlt rd, #1
+    emit(as, asm_arm_op_mov_imm(rd, 0) | ASM_ARM_CC_GE); // movge rd, #0
 }
 
 void asm_arm_add_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
@@ -300,13 +305,13 @@ void asm_arm_add_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
 
 void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num) {
     // add rd, sp, #local_num*4
-    emit_al(as, asm_arm_op_add_imm(rd, REG_SP, local_num << 2));
+    emit_al(as, asm_arm_op_add_imm(rd, ASM_ARM_REG_SP, local_num << 2));
 }
 
 void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label) {
     assert(label < as->max_num_labels);
-    int dest = as->label_offsets[label];
-    int rel = dest - as->code_offset;
+    mp_uint_t dest = as->label_offsets[label];
+    mp_int_t rel = dest - as->code_offset;
     rel -= 8; // account for instruction prefetch, PC is 8 bytes ahead of this instruction
     rel >>= 2; // in ARM mode the branch target is 32-bit aligned, so the 2 LSB are omitted
 
@@ -318,21 +323,21 @@ void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label) {
 }
 
 void asm_arm_b_label(asm_arm_t *as, uint label) {
-    asm_arm_bcc_label(as, ARM_CC_AL, label);
+    asm_arm_bcc_label(as, ASM_ARM_CC_AL, label);
 }
 
 void asm_arm_bl_ind(asm_arm_t *as, void *fun_ptr, uint fun_id, uint reg_temp) {
     // If the table offset fits into the ldr instruction
     if(fun_id < (0x1000 / 4)) {
-        emit_al(as, asm_arm_op_mov_reg(REG_LR, REG_PC)); // mov lr, pc
+        emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_LR, ASM_ARM_REG_PC)); // mov lr, pc
         emit_al(as, 0x597f000 | (fun_id << 2)); // ldr pc, [r7, #fun_id*4]
         return;
     }
     
     emit_al(as, 0x59f0004 | (reg_temp << 12)); // ldr rd, [pc, #4]
     // Set lr after fun_ptr
-    emit_al(as, asm_arm_op_add_imm(REG_LR, REG_PC, 4)); // add lr, pc, #4
-    emit_al(as, asm_arm_op_mov_reg(REG_PC, reg_temp)); // mov pc, reg_temp
+    emit_al(as, asm_arm_op_add_imm(ASM_ARM_REG_LR, ASM_ARM_REG_PC, 4)); // add lr, pc, #4
+    emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_PC, reg_temp)); // mov pc, reg_temp
     emit(as, (uint) fun_ptr);
 }
 
diff --git a/py/asmarm.h b/py/asmarm.h
index 7942793a2..e0c8efe1f 100644
--- a/py/asmarm.h
+++ b/py/asmarm.h
@@ -28,41 +28,41 @@
 #define ASM_ARM_PASS_COMPUTE (1)
 #define ASM_ARM_PASS_EMIT    (2)
 
-#define REG_R0  (0)
-#define REG_R1  (1)
-#define REG_R2  (2)
-#define REG_R3  (3)
-#define REG_R4  (4)
-#define REG_R5  (5)
-#define REG_R6  (6)
-#define REG_R7  (7)
-#define REG_R8  (8)
-#define REG_R9  (9)
-#define REG_R10 (10)
-#define REG_R11 (11)
-#define REG_R12 (12)
-#define REG_R13 (13)
-#define REG_R14 (14)
-#define REG_R15 (15)
-#define REG_SP  (REG_R13)
-#define REG_LR  (REG_R14)
-#define REG_PC  (REG_R15)
+#define ASM_ARM_REG_R0  (0)
+#define ASM_ARM_REG_R1  (1)
+#define ASM_ARM_REG_R2  (2)
+#define ASM_ARM_REG_R3  (3)
+#define ASM_ARM_REG_R4  (4)
+#define ASM_ARM_REG_R5  (5)
+#define ASM_ARM_REG_R6  (6)
+#define ASM_ARM_REG_R7  (7)
+#define ASM_ARM_REG_R8  (8)
+#define ASM_ARM_REG_R9  (9)
+#define ASM_ARM_REG_R10 (10)
+#define ASM_ARM_REG_R11 (11)
+#define ASM_ARM_REG_R12 (12)
+#define ASM_ARM_REG_R13 (13)
+#define ASM_ARM_REG_R14 (14)
+#define ASM_ARM_REG_R15 (15)
+#define ASM_ARM_REG_SP  (ASM_ARM_REG_R13)
+#define ASM_ARM_REG_LR  (ASM_ARM_REG_R14)
+#define ASM_ARM_REG_PC  (ASM_ARM_REG_R15)
 
-#define ARM_CC_EQ (0x0 << 28)
-#define ARM_CC_NE (0x1 << 28)
-#define ARM_CC_CS (0x2 << 28)
-#define ARM_CC_CC (0x3 << 28)
-#define ARM_CC_MI (0x4 << 28)
-#define ARM_CC_PL (0x5 << 28)
-#define ARM_CC_VS (0x6 << 28)
-#define ARM_CC_VC (0x7 << 28)
-#define ARM_CC_HI (0x8 << 28)
-#define ARM_CC_LS (0x9 << 28)
-#define ARM_CC_GE (0xa << 28)
-#define ARM_CC_LT (0xb << 28)
-#define ARM_CC_GT (0xc << 28)
-#define ARM_CC_LE (0xd << 28)
-#define ARM_CC_AL (0xe << 28)
+#define ASM_ARM_CC_EQ (0x0 << 28)
+#define ASM_ARM_CC_NE (0x1 << 28)
+#define ASM_ARM_CC_CS (0x2 << 28)
+#define ASM_ARM_CC_CC (0x3 << 28)
+#define ASM_ARM_CC_MI (0x4 << 28)
+#define ASM_ARM_CC_PL (0x5 << 28)
+#define ASM_ARM_CC_VS (0x6 << 28)
+#define ASM_ARM_CC_VC (0x7 << 28)
+#define ASM_ARM_CC_HI (0x8 << 28)
+#define ASM_ARM_CC_LS (0x9 << 28)
+#define ASM_ARM_CC_GE (0xa << 28)
+#define ASM_ARM_CC_LT (0xb << 28)
+#define ASM_ARM_CC_GT (0xc << 28)
+#define ASM_ARM_CC_LE (0xd << 28)
+#define ASM_ARM_CC_AL (0xe << 28)
 
 typedef struct _asm_arm_t asm_arm_t;
 
diff --git a/py/asmthumb.c b/py/asmthumb.c
index 1e9008839..7ae6862b5 100644
--- a/py/asmthumb.c
+++ b/py/asmthumb.c
@@ -68,18 +68,7 @@ void asm_thumb_free(asm_thumb_t *as, bool free_code) {
     if (free_code) {
         MP_PLAT_FREE_EXEC(as->code_base, as->code_size);
     }
-    /*
-    if (as->label != NULL) {
-        int i;
-        for (i = 0; i < as->label->len; ++i)
-        {
-            Label *lab = &g_array_index(as->label, Label, i);
-            if (lab->unresolved != NULL)
-                g_array_free(lab->unresolved, true);
-        }
-        g_array_free(as->label, true);
-    }
-    */
+    m_del(mp_uint_t, as->label_offsets, as->max_num_labels);
     m_del_obj(asm_thumb_t, as);
 }
 
@@ -284,23 +273,23 @@ void asm_thumb_op32(asm_thumb_t *as, uint op1, uint op2) {
 #define OP_FORMAT_2(op, rlo_dest, rlo_src, src_b) ((op) | ((src_b) << 6) | ((rlo_src) << 3) | (rlo_dest))
 
 void asm_thumb_format_2(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src, int src_b) {
-    assert(rlo_dest < REG_R8);
-    assert(rlo_src < REG_R8);
+    assert(rlo_dest < ASM_THUMB_REG_R8);
+    assert(rlo_src < ASM_THUMB_REG_R8);
     asm_thumb_op16(as, OP_FORMAT_2(op, rlo_dest, rlo_src, src_b));
 }
 
 #define OP_FORMAT_3(op, rlo, i8) ((op) | ((rlo) << 8) | (i8))
 
 void asm_thumb_format_3(asm_thumb_t *as, uint op, uint rlo, int i8) {
-    assert(rlo < REG_R8);
+    assert(rlo < ASM_THUMB_REG_R8);
     asm_thumb_op16(as, OP_FORMAT_3(op, rlo, i8));
 }
 
 #define OP_FORMAT_4(op, rlo_dest, rlo_src) ((op) | ((rlo_src) << 3) | (rlo_dest))
 
 void asm_thumb_format_4(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src) {
-    assert(rlo_dest < REG_R8);
-    assert(rlo_src < REG_R8);
+    assert(rlo_dest < ASM_THUMB_REG_R8);
+    assert(rlo_src < ASM_THUMB_REG_R8);
     asm_thumb_op16(as, OP_FORMAT_4(op, rlo_dest, rlo_src));
 }
 
@@ -331,7 +320,7 @@ void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) {
 
 // if loading lo half with movw, the i16 value will be zero extended into the r32 register!
 STATIC void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) {
-    assert(reg_dest < REG_R15);
+    assert(reg_dest < ASM_THUMB_REG_R15);
     // mov[wt] reg_dest, #i16_src
     asm_thumb_op32(as, mov_op | ((i16_src >> 1) & 0x0400) | ((i16_src >> 12) & 0xf), ((i16_src << 4) & 0x7000) | (reg_dest << 8) | (i16_src & 0xff));
 }
@@ -409,14 +398,14 @@ void asm_thumb_mov_reg_i32_aligned(asm_thumb_t *as, uint reg_dest, int i32) {
 #define OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset) (0x9800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
 
 void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num, uint rlo_src) {
-    assert(rlo_src < REG_R8);
+    assert(rlo_src < ASM_THUMB_REG_R8);
     int word_offset = local_num;
     assert(as->pass < ASM_THUMB_PASS_EMIT || word_offset >= 0);
     asm_thumb_op16(as, OP_STR_TO_SP_OFFSET(rlo_src, word_offset));
 }
 
 void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num) {
-    assert(rlo_dest < REG_R8);
+    assert(rlo_dest < ASM_THUMB_REG_R8);
     int word_offset = local_num;
     assert(as->pass < ASM_THUMB_PASS_EMIT || word_offset >= 0);
     asm_thumb_op16(as, OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset));
@@ -425,7 +414,7 @@ void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num) {
 #define OP_ADD_REG_SP_OFFSET(rlo_dest, word_offset) (0xa800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
 
 void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num) {
-    assert(rlo_dest < REG_R8);
+    assert(rlo_dest < ASM_THUMB_REG_R8);
     int word_offset = local_num;
     assert(as->pass < ASM_THUMB_PASS_EMIT || word_offset >= 0);
     asm_thumb_op16(as, OP_ADD_REG_SP_OFFSET(rlo_dest, word_offset));
@@ -439,7 +428,7 @@ void asm_thumb_b_label(asm_thumb_t *as, uint label) {
     mp_uint_t dest = get_label_dest(as, label);
     mp_int_t rel = dest - as->code_offset;
     rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
-    if (dest >= 0 && rel <= -4) {
+    if (dest != -1 && rel <= -4) {
         // is a backwards jump, so we know the size of the jump on the first pass
         // calculate rel assuming 12 bit relative jump
         if (SIGNED_FIT12(rel)) {
@@ -462,7 +451,7 @@ void asm_thumb_bcc_label(asm_thumb_t *as, int cond, uint label) {
     mp_uint_t dest = get_label_dest(as, label);
     mp_int_t rel = dest - as->code_offset;
     rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
-    if (dest >= 0 && rel <= -4) {
+    if (dest != -1 && rel <= -4) {
         // is a backwards jump, so we know the size of the jump on the first pass
         // calculate rel assuming 9 bit relative jump
         if (SIGNED_FIT9(rel)) {
@@ -482,17 +471,17 @@ void asm_thumb_bcc_label(asm_thumb_t *as, int cond, uint label) {
 
 void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp) {
     /* TODO make this use less bytes
-    uint rlo_base = REG_R3;
-    uint rlo_dest = REG_R7;
+    uint rlo_base = ASM_THUMB_REG_R3;
+    uint rlo_dest = ASM_THUMB_REG_R7;
     uint word_offset = 4;
     asm_thumb_op16(as, 0x0000);
     asm_thumb_op16(as, 0x6800 | (word_offset << 6) | (rlo_base << 3) | rlo_dest); // ldr rlo_dest, [rlo_base, #offset]
-    asm_thumb_op16(as, 0x4780 | (REG_R9 << 3)); // blx reg
+    asm_thumb_op16(as, 0x4780 | (ASM_THUMB_REG_R9 << 3)); // blx reg
     */
 
     if (fun_id < 32) {
         // load ptr to function from table, indexed by fun_id (must be in range 0-31); 4 bytes
-        asm_thumb_op16(as, OP_FORMAT_9_10(ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, reg_temp, REG_R7, fun_id));
+        asm_thumb_op16(as, OP_FORMAT_9_10(ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, reg_temp, ASM_THUMB_REG_R7, fun_id));
         asm_thumb_op16(as, OP_BLX(reg_temp));
     } else {
         // load ptr to function into register using immediate; 6 bytes
diff --git a/py/asmthumb.h b/py/asmthumb.h
index c17da16e0..beb12502c 100644
--- a/py/asmthumb.h
+++ b/py/asmthumb.h
@@ -27,38 +27,38 @@
 #define ASM_THUMB_PASS_COMPUTE (1)
 #define ASM_THUMB_PASS_EMIT    (2)
 
-#define REG_R0  (0)
-#define REG_R1  (1)
-#define REG_R2  (2)
-#define REG_R3  (3)
-#define REG_R4  (4)
-#define REG_R5  (5)
-#define REG_R6  (6)
-#define REG_R7  (7)
-#define REG_R8  (8)
-#define REG_R9  (9)
-#define REG_R10 (10)
-#define REG_R11 (11)
-#define REG_R12 (12)
-#define REG_R13 (13)
-#define REG_R14 (14)
-#define REG_R15 (15)
-#define REG_LR  (REG_R14)
-
-#define THUMB_CC_EQ (0x0)
-#define THUMB_CC_NE (0x1)
-#define THUMB_CC_CS (0x2)
-#define THUMB_CC_CC (0x3)
-#define THUMB_CC_MI (0x4)
-#define THUMB_CC_PL (0x5)
-#define THUMB_CC_VS (0x6)
-#define THUMB_CC_VC (0x7)
-#define THUMB_CC_HI (0x8)
-#define THUMB_CC_LS (0x9)
-#define THUMB_CC_GE (0xa)
-#define THUMB_CC_LT (0xb)
-#define THUMB_CC_GT (0xc)
-#define THUMB_CC_LE (0xd)
+#define ASM_THUMB_REG_R0  (0)
+#define ASM_THUMB_REG_R1  (1)
+#define ASM_THUMB_REG_R2  (2)
+#define ASM_THUMB_REG_R3  (3)
+#define ASM_THUMB_REG_R4  (4)
+#define ASM_THUMB_REG_R5  (5)
+#define ASM_THUMB_REG_R6  (6)
+#define ASM_THUMB_REG_R7  (7)
+#define ASM_THUMB_REG_R8  (8)
+#define ASM_THUMB_REG_R9  (9)
+#define ASM_THUMB_REG_R10 (10)
+#define ASM_THUMB_REG_R11 (11)
+#define ASM_THUMB_REG_R12 (12)
+#define ASM_THUMB_REG_R13 (13)
+#define ASM_THUMB_REG_R14 (14)
+#define ASM_THUMB_REG_R15 (15)
+#define ASM_THUMB_REG_LR  (REG_R14)
+
+#define ASM_THUMB_CC_EQ (0x0)
+#define ASM_THUMB_CC_NE (0x1)
+#define ASM_THUMB_CC_CS (0x2)
+#define ASM_THUMB_CC_CC (0x3)
+#define ASM_THUMB_CC_MI (0x4)
+#define ASM_THUMB_CC_PL (0x5)
+#define ASM_THUMB_CC_VS (0x6)
+#define ASM_THUMB_CC_VC (0x7)
+#define ASM_THUMB_CC_HI (0x8)
+#define ASM_THUMB_CC_LS (0x9)
+#define ASM_THUMB_CC_GE (0xa)
+#define ASM_THUMB_CC_LT (0xb)
+#define ASM_THUMB_CC_GT (0xc)
+#define ASM_THUMB_CC_LE (0xd)
 
 typedef struct _asm_thumb_t asm_thumb_t;
 
diff --git a/py/asmx64.c b/py/asmx64.c
index f2ad16d15..8d074dc40 100644
--- a/py/asmx64.c
+++ b/py/asmx64.c
@@ -114,17 +114,17 @@ struct _asm_x64_t {
     byte *code_base;
     byte dummy_data[8];
 
-    uint max_num_labels;
-    int *label_offsets;
+    mp_uint_t max_num_labels;
+    mp_uint_t *label_offsets;
     int num_locals;
 };
 
-asm_x64_t *asm_x64_new(uint max_num_labels) {
+asm_x64_t *asm_x64_new(mp_uint_t max_num_labels) {
     asm_x64_t *as;
 
     as = m_new0(asm_x64_t, 1);
     as->max_num_labels = max_num_labels;
-    as->label_offsets = m_new(int, max_num_labels);
+    as->label_offsets = m_new(mp_uint_t, max_num_labels);
 
     return as;
 }
@@ -133,18 +133,7 @@ void asm_x64_free(asm_x64_t *as, bool free_code) {
     if (free_code) {
         MP_PLAT_FREE_EXEC(as->code_base, as->code_size);
     }
-    /*
-    if (as->label != NULL) {
-        int i;
-        for (i = 0; i < as->label->len; ++i)
-        {
-            Label* lab = &g_array_index(as->label, Label, i);
-            if (lab->unresolved != NULL)
-                g_array_free(lab->unresolved, true);
-        }
-        g_array_free(as->label, true);
-    }
-    */
+    m_del(mp_uint_t, as->label_offsets, as->max_num_labels);
     m_del_obj(asm_x64_t, as);
 }
 
@@ -153,7 +142,7 @@ void asm_x64_start_pass(asm_x64_t *as, uint pass) {
     as->code_offset = 0;
     if (pass == ASM_X64_PASS_COMPUTE) {
         // reset all labels
-        memset(as->label_offsets, -1, as->max_num_labels * sizeof(int));
+        memset(as->label_offsets, -1, as->max_num_labels * sizeof(mp_uint_t));
     }
 }
 
@@ -192,7 +181,7 @@ STATIC byte *asm_x64_get_cur_to_write_bytes(asm_x64_t *as, int num_bytes_to_writ
     }
 }
 
-uint asm_x64_get_code_size(asm_x64_t *as) {
+mp_uint_t asm_x64_get_code_size(asm_x64_t *as) {
     return as->code_size;
 }
 
@@ -252,9 +241,9 @@ STATIC void asm_x64_write_word32_to(asm_x64_t *as, int offset, int w32) {
 
 STATIC void asm_x64_write_r64_disp(asm_x64_t *as, int r64, int disp_r64, int disp_offset) {
     assert(disp_r64 < 8);
-    assert(disp_r64 != REG_RSP);
+    assert(disp_r64 != ASM_X64_REG_RSP);
 
-    if (disp_offset == 0 && disp_r64 != REG_RBP) {
+    if (disp_offset == 0 && disp_r64 != ASM_X64_REG_RBP) {
         asm_x64_write_byte_1(as, MODRM_R64(r64) | MODRM_RM_DISP0 | MODRM_RM_R64(disp_r64));
     } else if (SIGNED_FIT8(disp_offset)) {
         asm_x64_write_byte_2(as, MODRM_R64(r64) | MODRM_RM_DISP8 | MODRM_RM_R64(disp_r64), IMM32_L0(disp_offset));
@@ -400,23 +389,6 @@ void asm_x64_add_r64_to_r64(asm_x64_t *as, int src_r64, int dest_r64) {
     asm_x64_write_byte_3(as, REX_PREFIX | REX_W, OPCODE_ADD_R64_TO_RM64, MODRM_R64(src_r64) | MODRM_RM_REG | MODRM_RM_R64(dest_r64));
 }
 
-/*
-void asm_x64_add_i32_to_r32(asm_x64_t *as, int src_i32, int dest_r32)
-{
-    assert(dest_r32 != REG_RSP); // in this case i think src_i32 must be 64 bits
-    if (SIGNED_FIT8(src_i32))
-    {
-        asm_x64_write_byte_2(as, OPCODE_ADD_I8_TO_RM32, MODRM_R64(0) | MODRM_RM_REG | MODRM_RM_R64(dest_r32));
-        asm_x64_write_byte_1(as, src_i32 & 0xff);
-    }
-    else
-    {
-        asm_x64_write_byte_2(as, OPCODE_ADD_I32_TO_RM32, MODRM_R64(0) | MODRM_RM_REG | MODRM_RM_R64(dest_r32));
-        asm_x64_write_word32(as, src_i32);
-    }
-}
-*/
-
 /*
 void asm_x64_sub_r32_from_r32(asm_x64_t *as, int src_r32, int dest_r32) {
     // defaults to 32 bit operation
@@ -495,8 +467,8 @@ void asm_x64_cmp_i32_with_r32(asm_x64_t *as, int src_i32, int src_r32) {
 
 void asm_x64_test_r8_with_r8(asm_x64_t *as, int src_r64_a, int src_r64_b) {
     // TODO implement for other registers
-    assert(src_r64_a == REG_RAX);
-    assert(src_r64_b == REG_RAX);
+    assert(src_r64_a == ASM_X64_REG_RAX);
+    assert(src_r64_b == ASM_X64_REG_RAX);
     asm_x64_write_byte_2(as, OPCODE_TEST_R8_WITH_RM8, MODRM_R64(src_r64_a) | MODRM_RM_REG | MODRM_RM_R64(src_r64_b));
 }
 
@@ -513,20 +485,20 @@ void asm_x64_label_assign(asm_x64_t *as, int label) {
         as->label_offsets[label] = as->code_offset;
     } else {
         // ensure label offset has not changed from PASS_COMPUTE to PASS_EMIT
-        //printf("l%d: (at %d=%ld)\n", label, as->label_offsets[label], as->code_offset);
+        //printf("l%d: (at %ld=%ld)\n", label, as->label_offsets[label], as->code_offset);
         assert(as->label_offsets[label] == as->code_offset);
     }
 }
 
-STATIC int get_label_dest(asm_x64_t *as, int label) {
+STATIC mp_uint_t get_label_dest(asm_x64_t *as, int label) {
     assert(label < as->max_num_labels);
     return as->label_offsets[label];
 }
 
 void asm_x64_jmp_label(asm_x64_t *as, int label) {
-    int dest = get_label_dest(as, label);
-    int rel = dest - as->code_offset;
-    if (dest >= 0 && rel < 0) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->code_offset;
+    if (dest != -1 && rel < 0) {
         // is a backwards jump, so we know the size of the jump on the first pass
         // calculate rel assuming 8 bit relative jump
         rel -= 2;
@@ -546,9 +518,9 @@ void asm_x64_jmp_label(asm_x64_t *as, int label) {
 }
 
 void asm_x64_jcc_label(asm_x64_t *as, int jcc_type, int label) {
-    int dest = get_label_dest(as, label);
-    int rel = dest - as->code_offset;
-    if (dest >= 0 && rel < 0) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->code_offset;
+    if (dest != -1 && rel < 0) {
         // is a backwards jump, so we know the size of the jump on the first pass
         // calculate rel assuming 8 bit relative jump
         rel -= 2;
@@ -568,23 +540,23 @@ void asm_x64_jcc_label(asm_x64_t *as, int jcc_type, int label) {
 }
 
 void asm_x64_entry(asm_x64_t *as, int num_locals) {
-    asm_x64_push_r64(as, REG_RBP);
-    asm_x64_mov_r64_to_r64(as, REG_RSP, REG_RBP);
+    asm_x64_push_r64(as, ASM_X64_REG_RBP);
+    asm_x64_mov_r64_to_r64(as, ASM_X64_REG_RSP, ASM_X64_REG_RBP);
     if (num_locals < 0) {
         num_locals = 0;
     }
     num_locals |= 1; // make it odd so stack is aligned on 16 byte boundary
-    asm_x64_sub_i32_from_r64(as, num_locals * WORD_SIZE, REG_RSP);
-    asm_x64_push_r64(as, REG_RBX);
-    asm_x64_push_r64(as, REG_R12);
-    asm_x64_push_r64(as, REG_R13);
+    asm_x64_sub_i32_from_r64(as, num_locals * WORD_SIZE, ASM_X64_REG_RSP);
+    asm_x64_push_r64(as, ASM_X64_REG_RBX);
+    asm_x64_push_r64(as, ASM_X64_REG_R12);
+    asm_x64_push_r64(as, ASM_X64_REG_R13);
     as->num_locals = num_locals;
 }
 
 void asm_x64_exit(asm_x64_t *as) {
-    asm_x64_pop_r64(as, REG_R13);
-    asm_x64_pop_r64(as, REG_R12);
-    asm_x64_pop_r64(as, REG_RBX);
+    asm_x64_pop_r64(as, ASM_X64_REG_R13);
+    asm_x64_pop_r64(as, ASM_X64_REG_R12);
+    asm_x64_pop_r64(as, ASM_X64_REG_RBX);
     asm_x64_write_byte_1(as, OPCODE_LEAVE);
     asm_x64_ret(as);
 }
@@ -605,30 +577,30 @@ STATIC int asm_x64_local_offset_from_ebp(asm_x64_t *as, int local_num) {
 }
 
 void asm_x64_mov_local_to_r64(asm_x64_t *as, int src_local_num, int dest_r64) {
-    asm_x64_mov_disp_to_r64(as, REG_RBP, asm_x64_local_offset_from_ebp(as, src_local_num), dest_r64);
+    asm_x64_mov_disp_to_r64(as, ASM_X64_REG_RBP, asm_x64_local_offset_from_ebp(as, src_local_num), dest_r64);
 }
 
 void asm_x64_mov_r64_to_local(asm_x64_t *as, int src_r64, int dest_local_num) {
-    asm_x64_mov_r64_to_disp(as, src_r64, REG_RBP, asm_x64_local_offset_from_ebp(as, dest_local_num));
+    asm_x64_mov_r64_to_disp(as, src_r64, ASM_X64_REG_RBP, asm_x64_local_offset_from_ebp(as, dest_local_num));
 }
 
 void asm_x64_mov_local_addr_to_r64(asm_x64_t *as, int local_num, int dest_r64) {
     int offset = asm_x64_local_offset_from_ebp(as, local_num);
     if (offset == 0) {
-        asm_x64_mov_r64_to_r64(as, REG_RBP, dest_r64);
+        asm_x64_mov_r64_to_r64(as, ASM_X64_REG_RBP, dest_r64);
     } else {
-        asm_x64_lea_disp_to_r64(as, REG_RBP, offset, dest_r64);
+        asm_x64_lea_disp_to_r64(as, ASM_X64_REG_RBP, offset, dest_r64);
     }
 }
 
 /*
 void asm_x64_push_local(asm_x64_t *as, int local_num) {
-    asm_x64_push_disp(as, REG_RBP, asm_x64_local_offset_from_ebp(as, local_num));
+    asm_x64_push_disp(as, ASM_X64_REG_RBP, asm_x64_local_offset_from_ebp(as, local_num));
 }
 
 void asm_x64_push_local_addr(asm_x64_t *as, int local_num, int temp_r64)
 {
-    asm_x64_mov_r64_to_r64(as, REG_RBP, temp_r64);
+    asm_x64_mov_r64_to_r64(as, ASM_X64_REG_RBP, temp_r64);
     asm_x64_add_i32_to_r32(as, asm_x64_local_offset_from_ebp(as, local_num), temp_r64);
     asm_x64_push_r64(as, temp_r64);
 }
@@ -639,21 +611,21 @@ void asm_x64_push_local_addr(asm_x64_t *as, int local_num, int temp_r64)
 
 void asm_x64_call(asm_x64_t *as, void* func)
 {
-    asm_x64_sub_i32_from_r32(as, 8, REG_RSP);
+    asm_x64_sub_i32_from_r32(as, 8, ASM_X64_REG_RSP);
     asm_x64_write_byte_1(as, OPCODE_CALL_REL32);
     asm_x64_write_word32(as, func - (void*)(as->code_cur + 4));
-    asm_x64_mov_r64_to_r64(as, REG_RBP, REG_RSP);
+    asm_x64_mov_r64_to_r64(as, ASM_X64_REG_RBP, ASM_X64_REG_RSP);
 }
 
 void asm_x64_call_i1(asm_x64_t *as, void* func, int i1)
 {
-    asm_x64_sub_i32_from_r32(as, 8, REG_RSP);
-    asm_x64_sub_i32_from_r32(as, 12, REG_RSP);
+    asm_x64_sub_i32_from_r32(as, 8, ASM_X64_REG_RSP);
+    asm_x64_sub_i32_from_r32(as, 12, ASM_X64_REG_RSP);
     asm_x64_push_i32(as, i1);
     asm_x64_write_byte_1(as, OPCODE_CALL_REL32);
     asm_x64_write_word32(as, func - (void*)(as->code_cur + 4));
-    asm_x64_add_i32_to_r32(as, 16, REG_RSP);
-    asm_x64_mov_r64_to_r64(as, REG_RBP, REG_RSP);
+    asm_x64_add_i32_to_r32(as, 16, ASM_X64_REG_RSP);
+    asm_x64_mov_r64_to_r64(as, ASM_X64_REG_RBP, ASM_X64_REG_RSP);
 }
 */
 
diff --git a/py/asmx64.h b/py/asmx64.h
index 440f57367..3b138a753 100644
--- a/py/asmx64.h
+++ b/py/asmx64.h
@@ -34,22 +34,22 @@
 #define ASM_X64_PASS_COMPUTE (1)
 #define ASM_X64_PASS_EMIT    (2)
 
-#define REG_RAX (0)
-#define REG_RCX (1)
-#define REG_RDX (2)
-#define REG_RBX (3)
-#define REG_RSP (4)
-#define REG_RBP (5)
-#define REG_RSI (6)
-#define REG_RDI (7)
-#define REG_R08 (8)
-#define REG_R09 (9)
-#define REG_R10 (10)
-#define REG_R11 (11)
-#define REG_R12 (12)
-#define REG_R13 (13)
-#define REG_R14 (14)
-#define REG_R15 (15)
+#define ASM_X64_REG_RAX (0)
+#define ASM_X64_REG_RCX (1)
+#define ASM_X64_REG_RDX (2)
+#define ASM_X64_REG_RBX (3)
+#define ASM_X64_REG_RSP (4)
+#define ASM_X64_REG_RBP (5)
+#define ASM_X64_REG_RSI (6)
+#define ASM_X64_REG_RDI (7)
+#define ASM_X64_REG_R08 (8)
+#define ASM_X64_REG_R09 (9)
+#define ASM_X64_REG_R10 (10)
+#define ASM_X64_REG_R11 (11)
+#define ASM_X64_REG_R12 (12)
+#define ASM_X64_REG_R13 (13)
+#define ASM_X64_REG_R14 (14)
+#define ASM_X64_REG_R15 (15)
 
 // condition codes, used for jcc and setcc (despite their j-name!)
 #define ASM_X64_CC_JB  (0x2) // below, unsigned
@@ -62,11 +62,11 @@
 
 typedef struct _asm_x64_t asm_x64_t;
 
-asm_x64_t* asm_x64_new(uint max_num_labels);
+asm_x64_t* asm_x64_new(mp_uint_t max_num_labels);
 void asm_x64_free(asm_x64_t* as, bool free_code);
 void asm_x64_start_pass(asm_x64_t *as, uint pass);
 void asm_x64_end_pass(asm_x64_t *as);
-uint asm_x64_get_code_size(asm_x64_t* as);
+mp_uint_t asm_x64_get_code_size(asm_x64_t* as);
 void* asm_x64_get_code(asm_x64_t* as);
 
 void asm_x64_nop(asm_x64_t* as);
diff --git a/py/asmx86.c b/py/asmx86.c
index ffe3e5e43..08299f851 100644
--- a/py/asmx86.c
+++ b/py/asmx86.c
@@ -102,8 +102,8 @@ struct _asm_x86_t {
     byte *code_base;
     byte dummy_data[8];
 
-    uint max_num_labels;
-    int *label_offsets;
+    mp_uint_t max_num_labels;
+    mp_uint_t *label_offsets;
     int num_locals;
 };
 
@@ -112,7 +112,7 @@ asm_x86_t *asm_x86_new(mp_uint_t max_num_labels) {
 
     as = m_new0(asm_x86_t, 1);
     as->max_num_labels = max_num_labels;
-    as->label_offsets = m_new(int, max_num_labels);
+    as->label_offsets = m_new(mp_uint_t, max_num_labels);
 
     return as;
 }
@@ -121,6 +121,7 @@ void asm_x86_free(asm_x86_t *as, bool free_code) {
     if (free_code) {
         MP_PLAT_FREE_EXEC(as->code_base, as->code_size);
     }
+    m_del(mp_uint_t, as->label_offsets, as->max_num_labels);
     m_del_obj(asm_x86_t, as);
 }
 
@@ -129,7 +130,7 @@ void asm_x86_start_pass(asm_x86_t *as, mp_uint_t pass) {
     as->code_offset = 0;
     if (pass == ASM_X86_PASS_COMPUTE) {
         // reset all labels
-        memset(as->label_offsets, -1, as->max_num_labels * sizeof(int));
+        memset(as->label_offsets, -1, as->max_num_labels * sizeof(mp_uint_t));
     }
 }
 
@@ -191,9 +192,9 @@ STATIC void asm_x86_write_word32(asm_x86_t *as, int w32) {
 }
 
 STATIC void asm_x86_write_r32_disp(asm_x86_t *as, int r32, int disp_r32, int disp_offset) {
-    assert(disp_r32 != REG_ESP);
+    assert(disp_r32 != ASM_X86_REG_ESP);
 
-    if (disp_offset == 0 && disp_r32 != REG_EBP) {
+    if (disp_offset == 0 && disp_r32 != ASM_X86_REG_EBP) {
         asm_x86_write_byte_1(as, MODRM_R32(r32) | MODRM_RM_DISP0 | MODRM_RM_R32(disp_r32));
     } else if (SIGNED_FIT8(disp_offset)) {
         asm_x86_write_byte_2(as, MODRM_R32(r32) | MODRM_RM_DISP8 | MODRM_RM_R32(disp_r32), IMM32_L0(disp_offset));
@@ -352,8 +353,8 @@ void asm_x86_cmp_i32_with_r32(asm_x86_t *as, int src_i32, int src_r32) {
 
 void asm_x86_test_r8_with_r8(asm_x86_t *as, int src_r32_a, int src_r32_b) {
     // TODO implement for other registers
-    assert(src_r32_a == REG_EAX);
-    assert(src_r32_b == REG_EAX);
+    assert(src_r32_a == ASM_X86_REG_EAX);
+    assert(src_r32_b == ASM_X86_REG_EAX);
     asm_x86_write_byte_2(as, OPCODE_TEST_R8_WITH_RM8, MODRM_R32(src_r32_a) | MODRM_RM_REG | MODRM_RM_R32(src_r32_b));
 }
 
@@ -374,15 +375,15 @@ void asm_x86_label_assign(asm_x86_t *as, mp_uint_t label) {
     }
 }
 
-STATIC int get_label_dest(asm_x86_t *as, int label) {
+STATIC mp_uint_t get_label_dest(asm_x86_t *as, int label) {
     assert(label < as->max_num_labels);
     return as->label_offsets[label];
 }
 
 void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label) {
-    int dest = get_label_dest(as, label);
-    int rel = dest - as->code_offset;
-    if (dest >= 0 && rel < 0) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->code_offset;
+    if (dest != -1 && rel < 0) {
         // is a backwards jump, so we know the size of the jump on the first pass
         // calculate rel assuming 8 bit relative jump
         rel -= 2;
@@ -402,9 +403,9 @@ void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label) {
 }
 
 void asm_x86_jcc_label(asm_x86_t *as, mp_uint_t jcc_type, mp_uint_t label) {
-    int dest = get_label_dest(as, label);
-    int rel = dest - as->code_offset;
-    if (dest >= 0 && rel < 0) {
+    mp_uint_t dest = get_label_dest(as, label);
+    mp_int_t rel = dest - as->code_offset;
+    if (dest != -1 && rel < 0) {
         // is a backwards jump, so we know the size of the jump on the first pass
         // calculate rel assuming 8 bit relative jump
         rel -= 2;
@@ -424,39 +425,39 @@ void asm_x86_jcc_label(asm_x86_t *as, mp_uint_t jcc_type, mp_uint_t label) {
 }
 
 void asm_x86_entry(asm_x86_t *as, mp_uint_t num_locals) {
-    asm_x86_push_r32(as, REG_EBP);
-    asm_x86_mov_r32_to_r32(as, REG_ESP, REG_EBP);
+    asm_x86_push_r32(as, ASM_X86_REG_EBP);
+    asm_x86_mov_r32_to_r32(as, ASM_X86_REG_ESP, ASM_X86_REG_EBP);
     if (num_locals > 0) {
-        asm_x86_sub_i32_from_r32(as, num_locals * WORD_SIZE, REG_ESP);
+        asm_x86_sub_i32_from_r32(as, num_locals * WORD_SIZE, ASM_X86_REG_ESP);
     }
-    asm_x86_push_r32(as, REG_EBX);
-    asm_x86_push_r32(as, REG_ESI);
-    asm_x86_push_r32(as, REG_EDI);
+    asm_x86_push_r32(as, ASM_X86_REG_EBX);
+    asm_x86_push_r32(as, ASM_X86_REG_ESI);
+    asm_x86_push_r32(as, ASM_X86_REG_EDI);
     // TODO align stack on 16-byte boundary
     as->num_locals = num_locals;
 }
 
 void asm_x86_exit(asm_x86_t *as) {
-    asm_x86_pop_r32(as, REG_EDI);
-    asm_x86_pop_r32(as, REG_ESI);
-    asm_x86_pop_r32(as, REG_EBX);
+    asm_x86_pop_r32(as, ASM_X86_REG_EDI);
+    asm_x86_pop_r32(as, ASM_X86_REG_ESI);
+    asm_x86_pop_r32(as, ASM_X86_REG_EBX);
     asm_x86_write_byte_1(as, OPCODE_LEAVE);
     asm_x86_ret(as);
 }
 
 #if 0
 void asm_x86_push_arg(asm_x86_t *as, int src_arg_num) {
-    asm_x86_push_disp(as, REG_EBP, 2 * WORD_SIZE + src_arg_num * WORD_SIZE);
+    asm_x86_push_disp(as, ASM_X86_REG_EBP, 2 * WORD_SIZE + src_arg_num * WORD_SIZE);
 }
 #endif
 
 void asm_x86_mov_arg_to_r32(asm_x86_t *as, int src_arg_num, int dest_r32) {
-    asm_x86_mov_disp_to_r32(as, REG_EBP, 2 * WORD_SIZE + src_arg_num * WORD_SIZE, dest_r32);
+    asm_x86_mov_disp_to_r32(as, ASM_X86_REG_EBP, 2 * WORD_SIZE + src_arg_num * WORD_SIZE, dest_r32);
 }
 
 #if 0
 void asm_x86_mov_r32_to_arg(asm_x86_t *as, int src_r32, int dest_arg_num) {
-    asm_x86_mov_r32_to_disp(as, src_r32, REG_EBP, 2 * WORD_SIZE + dest_arg_num * WORD_SIZE);
+    asm_x86_mov_r32_to_disp(as, src_r32, ASM_X86_REG_EBP, 2 * WORD_SIZE + dest_arg_num * WORD_SIZE);
 }
 #endif
 
@@ -476,30 +477,30 @@ STATIC int asm_x86_local_offset_from_ebp(asm_x86_t *as, int local_num) {
 }
 
 void asm_x86_mov_local_to_r32(asm_x86_t *as, int src_local_num, int dest_r32) {
-    asm_x86_mov_disp_to_r32(as, REG_EBP, asm_x86_local_offset_from_ebp(as, src_local_num), dest_r32);
+    asm_x86_mov_disp_to_r32(as, ASM_X86_REG_EBP, asm_x86_local_offset_from_ebp(as, src_local_num), dest_r32);
 }
 
 void asm_x86_mov_r32_to_local(asm_x86_t *as, int src_r32, int dest_local_num) {
-    asm_x86_mov_r32_to_disp(as, src_r32, REG_EBP, asm_x86_local_offset_from_ebp(as, dest_local_num));
+    asm_x86_mov_r32_to_disp(as, src_r32, ASM_X86_REG_EBP, asm_x86_local_offset_from_ebp(as, dest_local_num));
 }
 
 void asm_x86_mov_local_addr_to_r32(asm_x86_t *as, int local_num, int dest_r32) {
     int offset = asm_x86_local_offset_from_ebp(as, local_num);
     if (offset == 0) {
-        asm_x86_mov_r32_to_r32(as, REG_EBP, dest_r32);
+        asm_x86_mov_r32_to_r32(as, ASM_X86_REG_EBP, dest_r32);
     } else {
-        asm_x86_lea_disp_to_r32(as, REG_EBP, offset, dest_r32);
+        asm_x86_lea_disp_to_r32(as, ASM_X86_REG_EBP, offset, dest_r32);
     }
 }
 
 #if 0
 void asm_x86_push_local(asm_x86_t *as, int local_num) {
-    asm_x86_push_disp(as, REG_EBP, asm_x86_local_offset_from_ebp(as, local_num));
+    asm_x86_push_disp(as, ASM_X86_REG_EBP, asm_x86_local_offset_from_ebp(as, local_num));
 }
 
 void asm_x86_push_local_addr(asm_x86_t *as, int local_num, int temp_r32)
 {
-    asm_x86_mov_r32_to_r32(as, REG_EBP, temp_r32);
+    asm_x86_mov_r32_to_r32(as, ASM_X86_REG_EBP, temp_r32);
     asm_x86_add_i32_to_r32(as, asm_x86_local_offset_from_ebp(as, local_num), temp_r32);
     asm_x86_push_r32(as, temp_r32);
 }
@@ -534,7 +535,7 @@ void asm_x86_call_ind(asm_x86_t *as, void *ptr, mp_uint_t n_args, int temp_r32)
 
     // the caller must clean up the stack
     if (n_args > 0) {
-        asm_x86_add_i32_to_r32(as, WORD_SIZE * n_args, REG_ESP);
+        asm_x86_add_i32_to_r32(as, WORD_SIZE * n_args, ASM_X86_REG_ESP);
     }
 }
 
diff --git a/py/asmx86.h b/py/asmx86.h
index 76df7bf2b..0ee192378 100644
--- a/py/asmx86.h
+++ b/py/asmx86.h
@@ -35,22 +35,22 @@
 #define ASM_X86_PASS_COMPUTE (1)
 #define ASM_X86_PASS_EMIT    (2)
 
-#define REG_EAX (0)
-#define REG_ECX (1)
-#define REG_EDX (2)
-#define REG_EBX (3)
-#define REG_ESP (4)
-#define REG_EBP (5)
-#define REG_ESI (6)
-#define REG_EDI (7)
+#define ASM_X86_REG_EAX (0)
+#define ASM_X86_REG_ECX (1)
+#define ASM_X86_REG_EDX (2)
+#define ASM_X86_REG_EBX (3)
+#define ASM_X86_REG_ESP (4)
+#define ASM_X86_REG_EBP (5)
+#define ASM_X86_REG_ESI (6)
+#define ASM_X86_REG_EDI (7)
 
 // x86 passes values on the stack, but the emitter is register based, so we need
 // to define registers that can temporarily hold the function arguments.  They
 // need to be defined here so that asm_x86_call_ind can push them onto the stack
 // before the call.
-#define ASM_X86_REG_ARG_1 REG_EAX
-#define ASM_X86_REG_ARG_2 REG_ECX
-#define ASM_X86_REG_ARG_3 REG_EDX
+#define ASM_X86_REG_ARG_1 ASM_X86_REG_EAX
+#define ASM_X86_REG_ARG_2 ASM_X86_REG_ECX
+#define ASM_X86_REG_ARG_3 ASM_X86_REG_EDX
 
 // condition codes, used for jcc and setcc (despite their j-name!)
 #define ASM_X86_CC_JB  (0x2) // below, unsigned
diff --git a/py/emitinlinethumb.c b/py/emitinlinethumb.c
index 57a262f8e..09342f1f9 100644
--- a/py/emitinlinethumb.c
+++ b/py/emitinlinethumb.c
@@ -238,20 +238,20 @@ STATIC int get_arg_label(emit_inline_asm_t *emit, const char *op, mp_parse_node_
 
 typedef struct _cc_name_t { byte cc; byte name[2]; } cc_name_t;
 STATIC const cc_name_t cc_name_table[] = {
-    {THUMB_CC_EQ, "eq"},
-    {THUMB_CC_NE, "ne"},
-    {THUMB_CC_CS, "cs"},
-    {THUMB_CC_CC, "cc"},
-    {THUMB_CC_MI, "mi"},
-    {THUMB_CC_PL, "pl"},
-    {THUMB_CC_VS, "vs"},
-    {THUMB_CC_VC, "vc"},
-    {THUMB_CC_HI, "hi"},
-    {THUMB_CC_LS, "ls"},
-    {THUMB_CC_GE, "ge"},
-    {THUMB_CC_LT, "lt"},
-    {THUMB_CC_GT, "gt"},
-    {THUMB_CC_LE, "le"},
+    { ASM_THUMB_CC_EQ, "eq" },
+    { ASM_THUMB_CC_NE, "ne" },
+    { ASM_THUMB_CC_CS, "cs" },
+    { ASM_THUMB_CC_CC, "cc" },
+    { ASM_THUMB_CC_MI, "mi" },
+    { ASM_THUMB_CC_PL, "pl" },
+    { ASM_THUMB_CC_VS, "vs" },
+    { ASM_THUMB_CC_VC, "vc" },
+    { ASM_THUMB_CC_HI, "hi" },
+    { ASM_THUMB_CC_LS, "ls" },
+    { ASM_THUMB_CC_GE, "ge" },
+    { ASM_THUMB_CC_LT, "lt" },
+    { ASM_THUMB_CC_GT, "gt" },
+    { ASM_THUMB_CC_LE, "le" },
 };
 
 STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args) {
diff --git a/py/emitnative.c b/py/emitnative.c
index d45b05997..c2aa7a76b 100644
--- a/py/emitnative.c
+++ b/py/emitnative.c
@@ -82,21 +82,21 @@
 
 #define EXPORT_FUN(name) emit_native_x64_##name
 
-#define REG_RET REG_RAX
-#define REG_ARG_1 REG_RDI
-#define REG_ARG_2 REG_RSI
-#define REG_ARG_3 REG_RDX
-#define REG_ARG_4 REG_RCX
+#define REG_RET ASM_X64_REG_RAX
+#define REG_ARG_1 ASM_X64_REG_RDI
+#define REG_ARG_2 ASM_X64_REG_RSI
+#define REG_ARG_3 ASM_X64_REG_RDX
+#define REG_ARG_4 ASM_X64_REG_RCX
 
 // caller-save
-#define REG_TEMP0 REG_RAX
-#define REG_TEMP1 REG_RDI
-#define REG_TEMP2 REG_RSI
+#define REG_TEMP0 ASM_X64_REG_RAX
+#define REG_TEMP1 ASM_X64_REG_RDI
+#define REG_TEMP2 ASM_X64_REG_RSI
 
 // callee-save
-#define REG_LOCAL_1 REG_RBX
-#define REG_LOCAL_2 REG_R12
-#define REG_LOCAL_3 REG_R13
+#define REG_LOCAL_1 ASM_X64_REG_RBX
+#define REG_LOCAL_2 ASM_X64_REG_R12
+#define REG_LOCAL_3 ASM_X64_REG_R13
 #define REG_LOCAL_NUM (3)
 
 #define ASM_PASS_COMPUTE    ASM_X64_PASS_COMPUTE
@@ -129,7 +129,7 @@
         asm_x64_cmp_r64_with_r64(as, reg1, reg2); \
         asm_x64_jcc_label(as, ASM_X64_CC_JE, label); \
     } while (0)
-#define ASM_CALL_IND(as, ptr, idx) asm_x64_call_ind(as, ptr, REG_RAX)
+#define ASM_CALL_IND(as, ptr, idx) asm_x64_call_ind(as, ptr, ASM_X64_REG_RAX)
 
 #define ASM_MOV_REG_TO_LOCAL        asm_x64_mov_r64_to_local
 #define ASM_MOV_IMM_TO_REG          asm_x64_mov_i64_to_r64_optimised
@@ -199,20 +199,20 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
 
 #define EXPORT_FUN(name) emit_native_x86_##name
 
-#define REG_RET REG_EAX
+#define REG_RET ASM_X86_REG_EAX
 #define REG_ARG_1 ASM_X86_REG_ARG_1
 #define REG_ARG_2 ASM_X86_REG_ARG_2
 #define REG_ARG_3 ASM_X86_REG_ARG_3
 
 // caller-save, so can be used as temporaries
-#define REG_TEMP0 REG_EAX
-#define REG_TEMP1 REG_ECX
-#define REG_TEMP2 REG_EDX
+#define REG_TEMP0 ASM_X86_REG_EAX
+#define REG_TEMP1 ASM_X86_REG_ECX
+#define REG_TEMP2 ASM_X86_REG_EDX
 
 // callee-save, so can be used as locals
-#define REG_LOCAL_1 REG_EBX
-#define REG_LOCAL_2 REG_ESI
-#define REG_LOCAL_3 REG_EDI
+#define REG_LOCAL_1 ASM_X86_REG_EBX
+#define REG_LOCAL_2 ASM_X86_REG_ESI
+#define REG_LOCAL_3 ASM_X86_REG_EDI
 #define REG_LOCAL_NUM (3)
 
 #define ASM_PASS_COMPUTE    ASM_X86_PASS_COMPUTE
@@ -245,7 +245,7 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
         asm_x86_cmp_r32_with_r32(as, reg1, reg2); \
         asm_x86_jcc_label(as, ASM_X86_CC_JE, label); \
     } while (0)
-#define ASM_CALL_IND(as, ptr, idx) asm_x86_call_ind(as, ptr, mp_f_n_args[idx], REG_EAX)
+#define ASM_CALL_IND(as, ptr, idx) asm_x86_call_ind(as, ptr, mp_f_n_args[idx], ASM_X86_REG_EAX)
 
 #define ASM_MOV_REG_TO_LOCAL        asm_x86_mov_r32_to_local
 #define ASM_MOV_IMM_TO_REG          asm_x86_mov_i32_to_r32
@@ -267,19 +267,19 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
 
 #define EXPORT_FUN(name) emit_native_thumb_##name
 
-#define REG_RET REG_R0
-#define REG_ARG_1 REG_R0
-#define REG_ARG_2 REG_R1
-#define REG_ARG_3 REG_R2
-#define REG_ARG_4 REG_R3
+#define REG_RET ASM_THUMB_REG_R0
+#define REG_ARG_1 ASM_THUMB_REG_R0
+#define REG_ARG_2 ASM_THUMB_REG_R1
+#define REG_ARG_3 ASM_THUMB_REG_R2
+#define REG_ARG_4 ASM_THUMB_REG_R3
 
-#define REG_TEMP0 (REG_R0)
-#define REG_TEMP1 (REG_R1)
-#define REG_TEMP2 (REG_R2)
+#define REG_TEMP0 ASM_THUMB_REG_R0
+#define REG_TEMP1 ASM_THUMB_REG_R1
+#define REG_TEMP2 ASM_THUMB_REG_R2
 
-#define REG_LOCAL_1 (REG_R4)
-#define REG_LOCAL_2 (REG_R5)
-#define REG_LOCAL_3 (REG_R6)
+#define REG_LOCAL_1 ASM_THUMB_REG_R4
+#define REG_LOCAL_2 ASM_THUMB_REG_R5
+#define REG_LOCAL_3 ASM_THUMB_REG_R6
 #define REG_LOCAL_NUM (3)
 
 #define ASM_PASS_COMPUTE    ASM_THUMB_PASS_COMPUTE
@@ -300,19 +300,19 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
 #define ASM_JUMP_IF_REG_ZERO(as, reg, label) \
     do { \
         asm_thumb_cmp_rlo_i8(as, reg, 0); \
-        asm_thumb_bcc_label(as, THUMB_CC_EQ, label); \
+        asm_thumb_bcc_label(as, ASM_THUMB_CC_EQ, label); \
     } while (0)
 #define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \
     do { \
         asm_thumb_cmp_rlo_i8(as, reg, 0); \
-        asm_thumb_bcc_label(as, THUMB_CC_NE, label); \
+        asm_thumb_bcc_label(as, ASM_THUMB_CC_NE, label); \
     } while (0)
 #define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
     do { \
         asm_thumb_cmp_rlo_rlo(as, reg1, reg2); \
-        asm_thumb_bcc_label(as, THUMB_CC_EQ, label); \
+        asm_thumb_bcc_label(as, ASM_THUMB_CC_EQ, label); \
     } while (0)
-#define ASM_CALL_IND(as, ptr, idx) asm_thumb_bl_ind(as, ptr, idx, REG_R3)
+#define ASM_CALL_IND(as, ptr, idx) asm_thumb_bl_ind(as, ptr, idx, ASM_THUMB_REG_R3)
 
 #define ASM_MOV_REG_TO_LOCAL(as, reg, local_num) asm_thumb_mov_local_reg(as, (local_num), (reg))
 #define ASM_MOV_IMM_TO_REG(as, imm, reg) asm_thumb_mov_reg_i32_optimised(as, (reg), (imm))
@@ -334,19 +334,19 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
 
 #define EXPORT_FUN(name) emit_native_arm_##name
 
-#define REG_RET REG_R0
-#define REG_ARG_1 REG_R0
-#define REG_ARG_2 REG_R1
-#define REG_ARG_3 REG_R2
-#define REG_ARG_4 REG_R3
+#define REG_RET ASM_ARM_REG_R0
+#define REG_ARG_1 ASM_ARM_REG_R0
+#define REG_ARG_2 ASM_ARM_REG_R1
+#define REG_ARG_3 ASM_ARM_REG_R2
+#define REG_ARG_4 ASM_ARM_REG_R3
 
-#define REG_TEMP0 (REG_R0)
-#define REG_TEMP1 (REG_R1)
-#define REG_TEMP2 (REG_R2)
+#define REG_TEMP0 ASM_ARM_REG_R0
+#define REG_TEMP1 ASM_ARM_REG_R1
+#define REG_TEMP2 ASM_ARM_REG_R2
 
-#define REG_LOCAL_1 (REG_R4)
-#define REG_LOCAL_2 (REG_R5)
-#define REG_LOCAL_3 (REG_R6)
+#define REG_LOCAL_1 ASM_ARM_REG_R4
+#define REG_LOCAL_2 ASM_ARM_REG_R5
+#define REG_LOCAL_3 ASM_ARM_REG_R6
 #define REG_LOCAL_NUM (3)
 
 #define ASM_PASS_COMPUTE    ASM_ARM_PASS_COMPUTE
@@ -367,19 +367,19 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
 #define ASM_JUMP_IF_REG_ZERO(as, reg, label) \
     do { \
         asm_arm_cmp_reg_i8(as, reg, 0); \
-        asm_arm_bcc_label(as, ARM_CC_EQ, label); \
+        asm_arm_bcc_label(as, ASM_ARM_CC_EQ, label); \
     } while (0)
 #define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \
     do { \
         asm_arm_cmp_reg_i8(as, reg, 0); \
-        asm_arm_bcc_label(as, ARM_CC_NE, label); \
+        asm_arm_bcc_label(as, ASM_ARM_CC_NE, label); \
     } while (0)
 #define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
     do { \
         asm_arm_cmp_reg_reg(as, reg1, reg2); \
-        asm_arm_bcc_label(as, ARM_CC_EQ, label); \
+        asm_arm_bcc_label(as, ASM_ARM_CC_EQ, label); \
     } while (0)
-#define ASM_CALL_IND(as, ptr, idx) asm_arm_bl_ind(as, ptr, idx, REG_R3)
+#define ASM_CALL_IND(as, ptr, idx) asm_arm_bl_ind(as, ptr, idx, ASM_ARM_REG_R3)
 
 #define ASM_MOV_REG_TO_LOCAL(as, reg, local_num) asm_arm_mov_local_reg(as, (local_num), (reg))
 #define ASM_MOV_IMM_TO_REG(as, imm, reg) asm_arm_mov_reg_i32(as, (reg), (imm))
@@ -585,7 +585,7 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop
         }
     }
 
-    asm_thumb_mov_reg_i32(emit->as, REG_R7, (mp_uint_t)mp_fun_table);
+    asm_thumb_mov_reg_i32(emit->as, ASM_THUMB_REG_R7, (mp_uint_t)mp_fun_table);
 #elif N_ARM
     for (int i = 0; i < scope->num_pos_args; i++) {
         if (i == 0) {
@@ -602,7 +602,7 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop
         }
     }
 
-    asm_arm_mov_reg_i32(emit->as, REG_R7, (mp_uint_t)mp_fun_table);
+    asm_arm_mov_reg_i32(emit->as, ASM_ARM_REG_R7, (mp_uint_t)mp_fun_table);
 #else
     #error not implemented
 #endif
@@ -1072,9 +1072,9 @@ STATIC void emit_native_load_fast(emit_t *emit, qstr qst, mp_uint_t id_flags, mp
     } else if (local_num == 2) {
         emit_post_push_reg(emit, vtype, REG_LOCAL_3);
     } else {
-        need_reg_single(emit, REG_RAX, 0);
-        asm_x64_mov_local_to_r64(emit->as, local_num - REG_LOCAL_NUM, REG_RAX);
-        emit_post_push_reg(emit, vtype, REG_RAX);
+        need_reg_single(emit, REG_TEMP0, 0);
+        asm_x64_mov_local_to_r64(emit->as, local_num - REG_LOCAL_NUM, REG_TEMP0);
+        emit_post_push_reg(emit, vtype, REG_TEMP0);
     }
 #elif N_X86
     if (local_num == 0) {
@@ -1084,9 +1084,9 @@ STATIC void emit_native_load_fast(emit_t *emit, qstr qst, mp_uint_t id_flags, mp
     } else if (local_num == 2) {
         emit_post_push_reg(emit, vtype, REG_LOCAL_3);
     } else {
-        need_reg_single(emit, REG_EAX, 0);
-        asm_x86_mov_local_to_r32(emit->as, local_num - REG_LOCAL_NUM, REG_EAX);
-        emit_post_push_reg(emit, vtype, REG_EAX);
+        need_reg_single(emit, REG_TEMP0, 0);
+        asm_x86_mov_local_to_r32(emit->as, local_num - REG_LOCAL_NUM, REG_TEMP0);
+        emit_post_push_reg(emit, vtype, REG_TEMP0);
     }
 #elif N_THUMB
     if (local_num == 0) {
@@ -1096,9 +1096,9 @@ STATIC void emit_native_load_fast(emit_t *emit, qstr qst, mp_uint_t id_flags, mp
     } else if (local_num == 2) {
         emit_post_push_reg(emit, vtype, REG_LOCAL_3);
     } else {
-        need_reg_single(emit, REG_R0, 0);
-        asm_thumb_mov_reg_local(emit->as, REG_R0, local_num - REG_LOCAL_NUM);
-        emit_post_push_reg(emit, vtype, REG_R0);
+        need_reg_single(emit, REG_TEMP0, 0);
+        asm_thumb_mov_reg_local(emit->as, REG_TEMP0, local_num - REG_LOCAL_NUM);
+        emit_post_push_reg(emit, vtype, REG_TEMP0);
     }
 #elif N_ARM
     if (local_num == 0) {
@@ -1108,9 +1108,9 @@ STATIC void emit_native_load_fast(emit_t *emit, qstr qst, mp_uint_t id_flags, mp
     } else if (local_num == 2) {
         emit_post_push_reg(emit, vtype, REG_LOCAL_3);
     } else {
-        need_reg_single(emit, REG_R0, 0);
-        asm_arm_mov_reg_local(emit->as, REG_R0, local_num - REG_LOCAL_NUM);
-        emit_post_push_reg(emit, vtype, REG_R0);
+        need_reg_single(emit, REG_TEMP0, 0);
+        asm_arm_mov_reg_local(emit->as, REG_TEMP0, local_num - REG_LOCAL_NUM);
+        emit_post_push_reg(emit, vtype, REG_TEMP0);
     }
 #else
     #error not implemented
@@ -1183,8 +1183,8 @@ STATIC void emit_native_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num)
     } else if (local_num == 2) {
         emit_pre_pop_reg(emit, &vtype, REG_LOCAL_3);
     } else {
-        emit_pre_pop_reg(emit, &vtype, REG_RAX);
-        asm_x64_mov_r64_to_local(emit->as, REG_RAX, local_num - REG_LOCAL_NUM);
+        emit_pre_pop_reg(emit, &vtype, REG_TEMP0);
+        asm_x64_mov_r64_to_local(emit->as, REG_TEMP0, local_num - REG_LOCAL_NUM);
     }
 #elif N_X86
     if (local_num == 0) {
@@ -1194,8 +1194,8 @@ STATIC void emit_native_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num)
     } else if (local_num == 2) {
         emit_pre_pop_reg(emit, &vtype, REG_LOCAL_3);
     } else {
-        emit_pre_pop_reg(emit, &vtype, REG_EAX);
-        asm_x86_mov_r32_to_local(emit->as, REG_EAX, local_num - REG_LOCAL_NUM);
+        emit_pre_pop_reg(emit, &vtype, REG_TEMP0);
+        asm_x86_mov_r32_to_local(emit->as, REG_TEMP0, local_num - REG_LOCAL_NUM);
     }
 #elif N_THUMB
     if (local_num == 0) {
@@ -1205,8 +1205,8 @@ STATIC void emit_native_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num)
     } else if (local_num == 2) {
         emit_pre_pop_reg(emit, &vtype, REG_LOCAL_3);
     } else {
-        emit_pre_pop_reg(emit, &vtype, REG_R0);
-        asm_thumb_mov_local_reg(emit->as, local_num - REG_LOCAL_NUM, REG_R0);
+        emit_pre_pop_reg(emit, &vtype, REG_TEMP0);
+        asm_thumb_mov_local_reg(emit->as, local_num - REG_LOCAL_NUM, REG_TEMP0);
     }
 #elif N_ARM
     if (local_num == 0) {
@@ -1216,8 +1216,8 @@ STATIC void emit_native_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num)
     } else if (local_num == 2) {
         emit_pre_pop_reg(emit, &vtype, REG_LOCAL_3);
     } else {
-        emit_pre_pop_reg(emit, &vtype, REG_R0);
-        asm_arm_mov_local_reg(emit->as, local_num - REG_LOCAL_NUM, REG_R0);
+        emit_pre_pop_reg(emit, &vtype, REG_TEMP0);
+        asm_arm_mov_local_reg(emit->as, local_num - REG_LOCAL_NUM, REG_TEMP0);
     }
 #else
     #error not implemented
-- 
GitLab