From c90f59ec3a4c77848f36c710ed4ab8d55a944a0c Mon Sep 17 00:00:00 2001
From: Damien George <damien.p.george@gmail.com>
Date: Sat, 6 Sep 2014 23:06:36 +0100
Subject: [PATCH] py: Add support for emitting native x86 machine code.

---
 py/asmx64.h         |  12 +-
 py/asmx86.c         | 520 ++++++++++++++++++++++++++++++++++++++
 py/asmx86.h         |  77 ++++++
 py/compile.c        |   7 +
 py/emit.h           |   3 +
 py/emitnative.c     | 599 ++++++++++++++++++++++++++------------------
 py/mpconfig.h       |   7 +-
 py/py.mk            |   6 +
 py/qstrdefs.h       |   2 +-
 unix/mpconfigport.h |   3 +
 10 files changed, 987 insertions(+), 249 deletions(-)
 create mode 100644 py/asmx86.c
 create mode 100644 py/asmx86.h

diff --git a/py/asmx64.h b/py/asmx64.h
index 1d5d6c7c2..11c0fb516 100644
--- a/py/asmx64.h
+++ b/py/asmx64.h
@@ -37,12 +37,12 @@
 #define REG_RDI (7)
 
 // condition codes, used for jcc and setcc (despite their j-name!)
-#define JCC_JB  (0x2) // below, unsigned
-#define JCC_JZ  (0x4)
-#define JCC_JE  (0x4)
-#define JCC_JNZ (0x5)
-#define JCC_JNE (0x5)
-#define JCC_JL  (0xc) // less, signed
+#define ASM_X64_CC_JB  (0x2) // below, unsigned
+#define ASM_X64_CC_JZ  (0x4)
+#define ASM_X64_CC_JE  (0x4)
+#define ASM_X64_CC_JNZ (0x5)
+#define ASM_X64_CC_JNE (0x5)
+#define ASM_X64_CC_JL  (0xc) // less, signed
 
 #define REG_RET REG_RAX
 #define REG_ARG_1 REG_RDI
diff --git a/py/asmx86.c b/py/asmx86.c
new file mode 100644
index 000000000..a0a38161c
--- /dev/null
+++ b/py/asmx86.c
@@ -0,0 +1,520 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "mpconfig.h"
+#include "misc.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_X86
+
+#include "asmx86.h"
+
+/* all offsets are measured in multiples of 4 bytes */
+#define WORD_SIZE                (4)
+
+#define OPCODE_NOP               (0x90)
+#define OPCODE_PUSH_R32          (0x50)
+//#define OPCODE_PUSH_I32          (0x68)
+//#define OPCODE_PUSH_M32          (0xff) /* /6 */
+#define OPCODE_POP_R32           (0x58)
+#define OPCODE_RET               (0xc3)
+//#define OPCODE_MOV_I8_TO_R8      (0xb0) /* +rb */
+#define OPCODE_MOV_I32_TO_R32    (0xb8)
+//#define OPCODE_MOV_I32_TO_RM32   (0xc7)
+#define OPCODE_MOV_R32_TO_RM32   (0x89)
+#define OPCODE_MOV_RM32_TO_R32   (0x8b)
+#define OPCODE_LEA_MEM_TO_R32    (0x8d) /* /r */
+#define OPCODE_XOR_R32_TO_RM32   (0x31) /* /r */
+#define OPCODE_ADD_R32_TO_RM32   (0x01)
+//#define OPCODE_ADD_I32_TO_RM32   (0x81) /* /0 */
+//#define OPCODE_ADD_I8_TO_RM32    (0x83) /* /0 */
+//#define OPCODE_SUB_R32_FROM_RM32 (0x29)
+#define OPCODE_SUB_I32_FROM_RM32 (0x81) /* /5 */
+#define OPCODE_SUB_I8_FROM_RM32  (0x83) /* /5 */
+//#define OPCODE_SHL_RM32_BY_I8    (0xc1) /* /4 */
+//#define OPCODE_SHR_RM32_BY_I8    (0xc1) /* /5 */
+//#define OPCODE_SAR_RM32_BY_I8    (0xc1) /* /7 */
+//#define OPCODE_CMP_I32_WITH_RM32 (0x81) /* /7 */
+//#define OPCODE_CMP_I8_WITH_RM32  (0x83) /* /7 */
+#define OPCODE_CMP_R32_WITH_RM32 (0x39)
+//#define OPCODE_CMP_RM32_WITH_R32 (0x3b)
+#define OPCODE_TEST_R8_WITH_RM8  (0x84) /* /r */
+#define OPCODE_JMP_REL8          (0xeb)
+#define OPCODE_JMP_REL32         (0xe9)
+#define OPCODE_JCC_REL8          (0x70) /* | jcc type */
+#define OPCODE_JCC_REL32_A       (0x0f)
+#define OPCODE_JCC_REL32_B       (0x80) /* | jcc type */
+#define OPCODE_SETCC_RM8_A       (0x0f)
+#define OPCODE_SETCC_RM8_B       (0x90) /* | jcc type, /0 */
+#define OPCODE_CALL_REL32        (0xe8)
+#define OPCODE_CALL_RM32         (0xff) /* /2 */
+#define OPCODE_LEAVE             (0xc9)
+
+#define MODRM_R32(x)    ((x) << 3)
+#define MODRM_RM_DISP0  (0x00)
+#define MODRM_RM_DISP8  (0x40)
+#define MODRM_RM_DISP32 (0x80)
+#define MODRM_RM_REG    (0xc0)
+#define MODRM_RM_R32(x) (x)
+
+#define IMM32_L0(x) ((x) & 0xff)
+#define IMM32_L1(x) (((x) >> 8) & 0xff)
+#define IMM32_L2(x) (((x) >> 16) & 0xff)
+#define IMM32_L3(x) (((x) >> 24) & 0xff)
+
+#define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
+
+struct _asm_x86_t {
+    uint pass;
+    mp_uint_t code_offset;
+    mp_uint_t code_size;
+    byte *code_base;
+    byte dummy_data[8];
+
+    uint max_num_labels;
+    int *label_offsets;
+    int num_locals;
+};
+
+asm_x86_t *asm_x86_new(mp_uint_t max_num_labels) {
+    asm_x86_t *as;
+
+    as = m_new0(asm_x86_t, 1);
+    as->max_num_labels = max_num_labels;
+    as->label_offsets = m_new(int, max_num_labels);
+
+    return as;
+}
+
+void asm_x86_free(asm_x86_t *as, bool free_code) {
+    if (free_code) {
+        MP_PLAT_FREE_EXEC(as->code_base, as->code_size);
+    }
+    m_del_obj(asm_x86_t, as);
+}
+
+void asm_x86_start_pass(asm_x86_t *as, mp_uint_t pass) {
+    as->pass = pass;
+    as->code_offset = 0;
+    if (pass == ASM_X86_PASS_COMPUTE) {
+        // reset all labels
+        memset(as->label_offsets, -1, as->max_num_labels * sizeof(int));
+    }
+}
+
+void asm_x86_end_pass(asm_x86_t *as) {
+    if (as->pass == ASM_X86_PASS_COMPUTE) {
+        MP_PLAT_ALLOC_EXEC(as->code_offset, (void**) &as->code_base, &as->code_size);
+        if(as->code_base == NULL) {
+            assert(0);
+        }
+    }
+}
+
+// all functions must go through this one to emit bytes
+STATIC byte *asm_x86_get_cur_to_write_bytes(asm_x86_t *as, int num_bytes_to_write) {
+    //printf("emit %d\n", num_bytes_to_write);
+    if (as->pass < ASM_X86_PASS_EMIT) {
+        as->code_offset += num_bytes_to_write;
+        return as->dummy_data;
+    } else {
+        assert(as->code_offset + num_bytes_to_write <= as->code_size);
+        byte *c = as->code_base + as->code_offset;
+        as->code_offset += num_bytes_to_write;
+        return c;
+    }
+}
+
+mp_uint_t asm_x86_get_code_size(asm_x86_t *as) {
+    return as->code_size;
+}
+
+void *asm_x86_get_code(asm_x86_t *as) {
+    return as->code_base;
+}
+
+STATIC void asm_x86_write_byte_1(asm_x86_t *as, byte b1) {
+    byte* c = asm_x86_get_cur_to_write_bytes(as, 1);
+    c[0] = b1;
+}
+
+STATIC void asm_x86_write_byte_2(asm_x86_t *as, byte b1, byte b2) {
+    byte* c = asm_x86_get_cur_to_write_bytes(as, 2);
+    c[0] = b1;
+    c[1] = b2;
+}
+
+STATIC void asm_x86_write_byte_3(asm_x86_t *as, byte b1, byte b2, byte b3) {
+    byte* c = asm_x86_get_cur_to_write_bytes(as, 3);
+    c[0] = b1;
+    c[1] = b2;
+    c[2] = b3;
+}
+
+STATIC void asm_x86_write_word32(asm_x86_t *as, int w32) {
+    byte* c = asm_x86_get_cur_to_write_bytes(as, 4);
+    c[0] = IMM32_L0(w32);
+    c[1] = IMM32_L1(w32);
+    c[2] = IMM32_L2(w32);
+    c[3] = IMM32_L3(w32);
+}
+
+STATIC void asm_x86_write_r32_disp(asm_x86_t *as, int r32, int disp_r32, int disp_offset) {
+    assert(disp_r32 != REG_ESP);
+
+    if (disp_offset == 0 && disp_r32 != REG_EBP) {
+        asm_x86_write_byte_1(as, MODRM_R32(r32) | MODRM_RM_DISP0 | MODRM_RM_R32(disp_r32));
+    } else if (SIGNED_FIT8(disp_offset)) {
+        asm_x86_write_byte_2(as, MODRM_R32(r32) | MODRM_RM_DISP8 | MODRM_RM_R32(disp_r32), IMM32_L0(disp_offset));
+    } else {
+        asm_x86_write_byte_1(as, MODRM_R32(r32) | MODRM_RM_DISP32 | MODRM_RM_R32(disp_r32));
+        asm_x86_write_word32(as, disp_offset);
+    }
+}
+
+STATIC void asm_x86_nop(asm_x86_t *as) {
+    asm_x86_write_byte_1(as, OPCODE_NOP);
+}
+
+STATIC void asm_x86_push_r32(asm_x86_t *as, int src_r32) {
+    asm_x86_write_byte_1(as, OPCODE_PUSH_R32 | src_r32);
+}
+
+#if 0
+void asm_x86_push_i32(asm_x86_t *as, int src_i32) {
+    asm_x86_write_byte_1(as, OPCODE_PUSH_I32);
+    asm_x86_write_word32(as, src_i32);
+}
+
+void asm_x86_push_disp(asm_x86_t *as, int src_r32, int src_offset) {
+    asm_x86_write_byte_1(as, OPCODE_PUSH_M32);
+    asm_x86_write_r32_disp(as, 6, src_r32, src_offset);
+}
+#endif
+
+STATIC void asm_x86_pop_r32(asm_x86_t *as, int dest_r32) {
+    asm_x86_write_byte_1(as, OPCODE_POP_R32 | dest_r32);
+}
+
+STATIC void asm_x86_ret(asm_x86_t *as) {
+    asm_x86_write_byte_1(as, OPCODE_RET);
+}
+
+void asm_x86_mov_r32_to_r32(asm_x86_t *as, int src_r32, int dest_r32) {
+    asm_x86_write_byte_2(as, OPCODE_MOV_R32_TO_RM32, MODRM_R32(src_r32) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+}
+
+STATIC void asm_x86_mov_r32_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
+    asm_x86_write_byte_1(as, OPCODE_MOV_R32_TO_RM32);
+    asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
+}
+
+STATIC void asm_x86_mov_disp_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
+    asm_x86_write_byte_1(as, OPCODE_MOV_RM32_TO_R32);
+    asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
+}
+
+STATIC void asm_x86_lea_disp_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
+    asm_x86_write_byte_1(as, OPCODE_LEA_MEM_TO_R32);
+    asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
+}
+
+#if 0
+void asm_x86_mov_i8_to_r8(asm_x86_t *as, int src_i8, int dest_r32) {
+    asm_x86_write_byte_2(as, OPCODE_MOV_I8_TO_R8 | dest_r32, src_i8);
+}
+#endif
+
+void asm_x86_mov_i32_to_r32(asm_x86_t *as, int src_i32, int dest_r32) {
+    asm_x86_write_byte_1(as, OPCODE_MOV_I32_TO_R32 | dest_r32);
+    asm_x86_write_word32(as, src_i32);
+}
+
+// src_i32 is stored as a full word in the code, and aligned to machine-word boundary
+void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32) {
+    // mov instruction uses 1 byte for the instruction, before the i32
+    while (((as->code_offset + 1) & (WORD_SIZE - 1)) != 0) {
+        asm_x86_nop(as);
+    }
+    asm_x86_mov_i32_to_r32(as, src_i32, dest_r32);
+}
+
+void asm_x86_xor_r32_to_r32(asm_x86_t *as, int src_r32, int dest_r32) {
+    asm_x86_write_byte_2(as, OPCODE_XOR_R32_TO_RM32, MODRM_R32(src_r32) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+}
+
+void asm_x86_add_r32_to_r32(asm_x86_t *as, int src_r32, int dest_r32) {
+    asm_x86_write_byte_2(as, OPCODE_ADD_R32_TO_RM32, MODRM_R32(src_r32) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+}
+
+#if 0
+void asm_x86_add_i32_to_r32(asm_x86_t *as, int src_i32, int dest_r32)
+{
+    if (SIGNED_FIT8(src_i32))
+    {
+        asm_x86_write_byte_2(as, OPCODE_ADD_I8_TO_RM32, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+        asm_x86_write_byte_1(as, src_i32 & 0xff);
+    }
+    else
+    {
+        asm_x86_write_byte_2(as, OPCODE_ADD_I32_TO_RM32, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+        asm_x86_write_word32(as, src_i32);
+    }
+}
+
+void asm_x86_sub_r32_from_r32(asm_x86_t *as, int src_r32, int dest_r32) {
+    asm_x86_write_byte_2(as, OPCODE_SUB_R32_FROM_RM32, MODRM_R32(src_r32) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+}
+#endif
+
+void asm_x86_sub_i32_from_r32(asm_x86_t *as, int src_i32, int dest_r32) {
+    if (SIGNED_FIT8(src_i32)) {
+        // defaults to 32 bit operation
+        asm_x86_write_byte_2(as, OPCODE_SUB_I8_FROM_RM32, MODRM_R32(5) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+        asm_x86_write_byte_1(as, src_i32 & 0xff);
+    } else {
+        // defaults to 32 bit operation
+        asm_x86_write_byte_2(as, OPCODE_SUB_I32_FROM_RM32, MODRM_R32(5) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+        asm_x86_write_word32(as, src_i32);
+    }
+}
+
+#if 0
+/* shifts not tested */
+void asm_x86_shl_r32_by_imm(asm_x86_t *as, int r32, int imm) {
+    asm_x86_write_byte_2(as, OPCODE_SHL_RM32_BY_I8, MODRM_R32(4) | MODRM_RM_REG | MODRM_RM_R32(r32));
+    asm_x86_write_byte_1(as, imm);
+}
+
+void asm_x86_shr_r32_by_imm(asm_x86_t *as, int r32, int imm) {
+    asm_x86_write_byte_2(as, OPCODE_SHR_RM32_BY_I8, MODRM_R32(5) | MODRM_RM_REG | MODRM_RM_R32(r32));
+    asm_x86_write_byte_1(as, imm);
+}
+
+void asm_x86_sar_r32_by_imm(asm_x86_t *as, int r32, int imm) {
+    asm_x86_write_byte_2(as, OPCODE_SAR_RM32_BY_I8, MODRM_R32(7) | MODRM_RM_REG | MODRM_RM_R32(r32));
+    asm_x86_write_byte_1(as, imm);
+}
+#endif
+
+void asm_x86_cmp_r32_with_r32(asm_x86_t *as, int src_r32_a, int src_r32_b) {
+    asm_x86_write_byte_2(as, OPCODE_CMP_R32_WITH_RM32, MODRM_R32(src_r32_a) | MODRM_RM_REG | MODRM_RM_R32(src_r32_b));
+}
+
+#if 0
+void asm_x86_cmp_i32_with_r32(asm_x86_t *as, int src_i32, int src_r32) {
+    if (SIGNED_FIT8(src_i32)) {
+        asm_x86_write_byte_2(as, OPCODE_CMP_I8_WITH_RM32, MODRM_R32(7) | MODRM_RM_REG | MODRM_RM_R32(src_r32));
+        asm_x86_write_byte_1(as, src_i32 & 0xff);
+    } else {
+        asm_x86_write_byte_2(as, OPCODE_CMP_I32_WITH_RM32, MODRM_R32(7) | MODRM_RM_REG | MODRM_RM_R32(src_r32));
+        asm_x86_write_word32(as, src_i32);
+    }
+}
+#endif
+
+void asm_x86_test_r8_with_r8(asm_x86_t *as, int src_r32_a, int src_r32_b) {
+    // TODO implement for other registers
+    assert(src_r32_a == REG_EAX);
+    assert(src_r32_b == REG_EAX);
+    asm_x86_write_byte_2(as, OPCODE_TEST_R8_WITH_RM8, MODRM_R32(src_r32_a) | MODRM_RM_REG | MODRM_RM_R32(src_r32_b));
+}
+
+void asm_x86_setcc_r8(asm_x86_t *as, mp_uint_t jcc_type, int dest_r8) {
+    asm_x86_write_byte_3(as, OPCODE_SETCC_RM8_A, OPCODE_SETCC_RM8_B | jcc_type, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r8));
+}
+
+void asm_x86_label_assign(asm_x86_t *as, mp_uint_t label) {
+    assert(label < as->max_num_labels);
+    if (as->pass < ASM_X86_PASS_EMIT) {
+        // assign label offset
+        assert(as->label_offsets[label] == -1);
+        as->label_offsets[label] = as->code_offset;
+    } else {
+        // ensure label offset has not changed from PASS_COMPUTE to PASS_EMIT
+        //printf("l%d: (at %d=%ld)\n", label, as->label_offsets[label], as->code_offset);
+        assert(as->label_offsets[label] == as->code_offset);
+    }
+}
+
+STATIC int get_label_dest(asm_x86_t *as, int label) {
+    assert(label < as->max_num_labels);
+    return as->label_offsets[label];
+}
+
+void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label) {
+    int dest = get_label_dest(as, label);
+    int rel = dest - as->code_offset;
+    if (dest >= 0 && rel < 0) {
+        // is a backwards jump, so we know the size of the jump on the first pass
+        // calculate rel assuming 8 bit relative jump
+        rel -= 2;
+        if (SIGNED_FIT8(rel)) {
+            asm_x86_write_byte_2(as, OPCODE_JMP_REL8, rel & 0xff);
+        } else {
+            rel += 2;
+            goto large_jump;
+        }
+    } else {
+        // is a forwards jump, so need to assume it's large
+        large_jump:
+        rel -= 5;
+        asm_x86_write_byte_1(as, OPCODE_JMP_REL32);
+        asm_x86_write_word32(as, rel);
+    }
+}
+
+void asm_x86_jcc_label(asm_x86_t *as, mp_uint_t jcc_type, mp_uint_t label) {
+    int dest = get_label_dest(as, label);
+    int rel = dest - as->code_offset;
+    if (dest >= 0 && rel < 0) {
+        // is a backwards jump, so we know the size of the jump on the first pass
+        // calculate rel assuming 8 bit relative jump
+        rel -= 2;
+        if (SIGNED_FIT8(rel)) {
+            asm_x86_write_byte_2(as, OPCODE_JCC_REL8 | jcc_type, rel & 0xff);
+        } else {
+            rel += 2;
+            goto large_jump;
+        }
+    } else {
+        // is a forwards jump, so need to assume it's large
+        large_jump:
+        rel -= 6;
+        asm_x86_write_byte_2(as, OPCODE_JCC_REL32_A, OPCODE_JCC_REL32_B | jcc_type);
+        asm_x86_write_word32(as, rel);
+    }
+}
+
+void asm_x86_entry(asm_x86_t *as, mp_uint_t num_locals) {
+    asm_x86_push_r32(as, REG_EBP);
+    asm_x86_mov_r32_to_r32(as, REG_ESP, REG_EBP);
+    asm_x86_sub_i32_from_r32(as, num_locals * WORD_SIZE, REG_ESP);
+    asm_x86_push_r32(as, REG_EBX);
+    as->num_locals = num_locals;
+}
+
+void asm_x86_exit(asm_x86_t *as) {
+    asm_x86_pop_r32(as, REG_EBX);
+    asm_x86_write_byte_1(as, OPCODE_LEAVE);
+    asm_x86_ret(as);
+}
+
+#if 0
+void asm_x86_push_arg(asm_x86_t *as, int src_arg_num) {
+    assert(0);
+    asm_x86_push_disp(as, REG_EBP, 8 + src_arg_num * WORD_SIZE);
+}
+
+void asm_x86_mov_arg_to_r32(asm_x86_t *as, int src_arg_num, int dest_r32) {
+    assert(0);
+    //asm_x86_mov_disp_to_r32(as, REG_EBP, 8 + src_arg_num * WORD_SIZE, dest_r32);
+}
+
+void asm_x86_mov_r32_to_arg(asm_x86_t *as, int src_r32, int dest_arg_num) {
+    assert(0);
+    //asm_x86_mov_r32_to_disp(as, src_r32, REG_EBP, 8 + dest_arg_num * WORD_SIZE);
+}
+#endif
+
+// locals:
+//  - stored on the stack in ascending order
+//  - numbered 0 through as->num_locals-1
+//  - EBP points above the last local
+//
+//                          | EPB
+//                          v
+//  l0  l1  l2  ...  l(n-1)
+//  ^                ^
+//  | low address    | high address in RAM
+//
+STATIC int asm_x86_local_offset_from_ebp(asm_x86_t *as, int local_num) {
+    return (-as->num_locals + local_num) * WORD_SIZE;
+}
+
+void asm_x86_mov_local_to_r32(asm_x86_t *as, int src_local_num, int dest_r32) {
+    asm_x86_mov_disp_to_r32(as, REG_EBP, asm_x86_local_offset_from_ebp(as, src_local_num), dest_r32);
+}
+
+void asm_x86_mov_r32_to_local(asm_x86_t *as, int src_r32, int dest_local_num) {
+    asm_x86_mov_r32_to_disp(as, src_r32, REG_EBP, asm_x86_local_offset_from_ebp(as, dest_local_num));
+}
+
+void asm_x86_mov_local_addr_to_r32(asm_x86_t *as, int local_num, int dest_r32) {
+    int offset = asm_x86_local_offset_from_ebp(as, local_num);
+    if (offset == 0) {
+        asm_x86_mov_r32_to_r32(as, REG_EBP, dest_r32);
+    } else {
+        asm_x86_lea_disp_to_r32(as, REG_EBP, offset, dest_r32);
+    }
+}
+
+#if 0
+void asm_x86_push_local(asm_x86_t *as, int local_num) {
+    asm_x86_push_disp(as, REG_EBP, asm_x86_local_offset_from_ebp(as, local_num));
+}
+
+void asm_x86_push_local_addr(asm_x86_t *as, int local_num, int temp_r32)
+{
+    asm_x86_mov_r32_to_r32(as, REG_EBP, temp_r32);
+    asm_x86_add_i32_to_r32(as, asm_x86_local_offset_from_ebp(as, local_num), temp_r32);
+    asm_x86_push_r32(as, temp_r32);
+}
+#endif
+
+void asm_x86_call_ind(asm_x86_t *as, void *ptr, mp_uint_t n_args, int temp_r32) {
+    assert(n_args <= 3);
+    if (n_args > 2) {
+        asm_x86_push_r32(as, REG_ARG_3);
+    }
+    if (n_args > 1) {
+        asm_x86_push_r32(as, REG_ARG_2);
+    }
+    if (n_args > 0) {
+        asm_x86_push_r32(as, REG_ARG_1);
+    }
+#ifdef __LP64__
+    // We wouldn't run x86 code on an x64 machine.  This is here to enable
+    // testing of the x86 emitter only.
+    asm_x86_mov_i32_to_r32(as, (int32_t)(int64_t)ptr, temp_r32);
+#else
+    // If we get here, sizeof(int) == sizeof(void*).
+    asm_x86_mov_i32_to_r32(as, (int32_t)ptr, temp_r32);
+#endif
+    asm_x86_write_byte_2(as, OPCODE_CALL_RM32, MODRM_R32(2) | MODRM_RM_REG | MODRM_RM_R32(temp_r32));
+    // this reduces code size by 2 bytes per call, but doesn't seem to speed it up at all
+    /*
+    asm_x86_write_byte_1(as, OPCODE_CALL_REL32);
+    asm_x86_write_word32(as, ptr - (void*)(as->code_base + as->code_offset + 4));
+    */
+}
+
+#endif // MICROPY_EMIT_X86
diff --git a/py/asmx86.h b/py/asmx86.h
new file mode 100644
index 000000000..5d0fc70e3
--- /dev/null
+++ b/py/asmx86.h
@@ -0,0 +1,77 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#define ASM_X86_PASS_COMPUTE (1)
+#define ASM_X86_PASS_EMIT    (2)
+
+#define REG_EAX (0)
+#define REG_ECX (1)
+#define REG_EDX (2)
+#define REG_EBX (3)
+#define REG_ESP (4)
+#define REG_EBP (5)
+#define REG_ESI (6)
+#define REG_EDI (7)
+
+// condition codes, used for jcc and setcc (despite their j-name!)
+#define ASM_X86_CC_JB  (0x2) // below, unsigned
+#define ASM_X86_CC_JZ  (0x4)
+#define ASM_X86_CC_JE  (0x4)
+#define ASM_X86_CC_JNZ (0x5)
+#define ASM_X86_CC_JNE (0x5)
+#define ASM_X86_CC_JL  (0xc) // less, signed
+
+#define REG_RET REG_EAX
+#define REG_ARG_1 REG_EDI
+#define REG_ARG_2 REG_ESI
+#define REG_ARG_3 REG_EDX
+
+typedef struct _asm_x86_t asm_x86_t;
+
+asm_x86_t* asm_x86_new(mp_uint_t max_num_labels);
+void asm_x86_free(asm_x86_t* as, bool free_code);
+void asm_x86_start_pass(asm_x86_t *as, mp_uint_t pass);
+void asm_x86_end_pass(asm_x86_t *as);
+mp_uint_t asm_x86_get_code_size(asm_x86_t* as);
+void* asm_x86_get_code(asm_x86_t* as);
+
+void asm_x86_mov_r32_to_r32(asm_x86_t* as, int src_r32, int dest_r32);
+void asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32);
+void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32);
+void asm_x86_xor_r32_to_r32(asm_x86_t *as, int src_r32, int dest_r32);
+void asm_x86_add_r32_to_r32(asm_x86_t* as, int src_r32, int dest_r32);
+void asm_x86_cmp_r32_with_r32(asm_x86_t* as, int src_r32_a, int src_r32_b);
+void asm_x86_test_r8_with_r8(asm_x86_t* as, int src_r32_a, int src_r32_b);
+void asm_x86_setcc_r8(asm_x86_t* as, mp_uint_t jcc_type, int dest_r8);
+void asm_x86_label_assign(asm_x86_t* as, mp_uint_t label);
+void asm_x86_jmp_label(asm_x86_t* as, mp_uint_t label);
+void asm_x86_jcc_label(asm_x86_t* as, mp_uint_t jcc_type, mp_uint_t label);
+void asm_x86_entry(asm_x86_t* as, mp_uint_t num_locals);
+void asm_x86_exit(asm_x86_t* as);
+void asm_x86_mov_local_to_r32(asm_x86_t* as, int src_local_num, int dest_r32);
+void asm_x86_mov_r32_to_local(asm_x86_t* as, int src_r32, int dest_local_num);
+void asm_x86_mov_local_addr_to_r32(asm_x86_t* as, int local_num, int dest_r32);
+void asm_x86_call_ind(asm_x86_t* as, void* ptr, mp_uint_t n_args, int temp_r32);
diff --git a/py/compile.c b/py/compile.c
index d92018703..7056c3496 100644
--- a/py/compile.c
+++ b/py/compile.c
@@ -3638,6 +3638,11 @@ mp_obj_t mp_compile(mp_parse_node_t pn, qstr source_file, uint emit_opt, bool is
                         emit_native = emit_native_x64_new(max_num_labels);
                     }
                     comp->emit_method_table = &emit_native_x64_method_table;
+#elif MICROPY_EMIT_X86
+                    if (emit_native == NULL) {
+                        emit_native = emit_native_x86_new(max_num_labels);
+                    }
+                    comp->emit_method_table = &emit_native_x86_method_table;
 #elif MICROPY_EMIT_THUMB
                     if (emit_native == NULL) {
                         emit_native = emit_native_thumb_new(max_num_labels);
@@ -3689,6 +3694,8 @@ mp_obj_t mp_compile(mp_parse_node_t pn, qstr source_file, uint emit_opt, bool is
     if (emit_native != NULL) {
 #if MICROPY_EMIT_X64
         emit_native_x64_free(emit_native);
+#elif MICROPY_EMIT_X86
+        emit_native_x86_free(emit_native);
 #elif MICROPY_EMIT_THUMB
         emit_native_thumb_free(emit_native);
 #elif MICROPY_EMIT_ARM
diff --git a/py/emit.h b/py/emit.h
index 275ea494d..2c0f1bca9 100644
--- a/py/emit.h
+++ b/py/emit.h
@@ -160,6 +160,7 @@ extern const emit_method_table_t emit_pass1_method_table;
 extern const emit_method_table_t emit_cpython_method_table;
 extern const emit_method_table_t emit_bc_method_table;
 extern const emit_method_table_t emit_native_x64_method_table;
+extern const emit_method_table_t emit_native_x86_method_table;
 extern const emit_method_table_t emit_native_thumb_method_table;
 extern const emit_method_table_t emit_native_arm_method_table;
 
@@ -167,12 +168,14 @@ emit_t *emit_pass1_new(void);
 emit_t *emit_cpython_new(uint max_num_labels);
 emit_t *emit_bc_new(uint max_num_labels);
 emit_t *emit_native_x64_new(uint max_num_labels);
+emit_t *emit_native_x86_new(uint max_num_labels);
 emit_t *emit_native_thumb_new(uint max_num_labels);
 emit_t *emit_native_arm_new(uint max_num_labels);
 
 void emit_pass1_free(emit_t *emit);
 void emit_bc_free(emit_t *emit);
 void emit_native_x64_free(emit_t *emit);
+void emit_native_x86_free(emit_t *emit);
 void emit_native_thumb_free(emit_t *emit);
 void emit_native_arm_free(emit_t *emit);
 
diff --git a/py/emitnative.c b/py/emitnative.c
index c24f38069..782f4b60d 100644
--- a/py/emitnative.c
+++ b/py/emitnative.c
@@ -69,7 +69,10 @@
 #endif
 
 // wrapper around everything in this file
-#if (MICROPY_EMIT_X64 && N_X64) || (MICROPY_EMIT_THUMB && N_THUMB) || (MICROPY_EMIT_ARM && N_ARM)
+#if (MICROPY_EMIT_X64 && N_X64) \
+    || (MICROPY_EMIT_X86 && N_X86) \
+    || (MICROPY_EMIT_THUMB && N_THUMB) \
+    || (MICROPY_EMIT_ARM && N_ARM)
 
 #if N_X64
 
@@ -77,21 +80,165 @@
 
 #include "asmx64.h"
 
-#define REG_LOCAL_1 (REG_RBX)
-#define REG_LOCAL_NUM (1)
-
 #define EXPORT_FUN(name) emit_native_x64_##name
 
 #define REG_TEMP0 (REG_RAX)
 #define REG_TEMP1 (REG_RDI)
 #define REG_TEMP2 (REG_RSI)
-#define ASM_MOV_REG_TO_LOCAL(reg, local_num) asm_x64_mov_r64_to_local(emit->as, (reg), (local_num))
-#define ASM_MOV_IMM_TO_REG(imm, reg) asm_x64_mov_i64_to_r64_optimised(emit->as, (imm), (reg))
-#define ASM_MOV_ALIGNED_IMM_TO_REG(imm, reg) asm_x64_mov_i64_to_r64_aligned(emit->as, (imm), (reg))
-#define ASM_MOV_IMM_TO_LOCAL_USING(imm, local_num, reg_temp) do { asm_x64_mov_i64_to_r64_optimised(emit->as, (imm), (reg_temp)); asm_x64_mov_r64_to_local(emit->as, (reg_temp), (local_num)); } while (false)
-#define ASM_MOV_LOCAL_TO_REG(local_num, reg) asm_x64_mov_local_to_r64(emit->as, (local_num), (reg))
-#define ASM_MOV_REG_TO_REG(reg_src, reg_dest) asm_x64_mov_r64_to_r64(emit->as, (reg_src), (reg_dest))
-#define ASM_MOV_LOCAL_ADDR_TO_REG(local_num, reg) asm_x64_mov_local_addr_to_r64(emit->as, (local_num), (reg))
+
+#define REG_LOCAL_1 (REG_RBX)
+#define REG_LOCAL_NUM (1)
+
+#define ASM_PASS_COMPUTE    ASM_X64_PASS_COMPUTE
+#define ASM_PASS_EMIT       ASM_X64_PASS_EMIT
+
+#define ASM_T               asm_x64_t
+#define ASM_NEW             asm_x64_new
+#define ASM_FREE            asm_x64_free
+#define ASM_GET_CODE        asm_x64_get_code
+#define ASM_GET_CODE_SIZE   asm_x64_get_code_size
+#define ASM_START_PASS      asm_x64_start_pass
+#define ASM_END_PASS        asm_x64_end_pass
+#define ASM_ENTRY           asm_x64_entry
+#define ASM_EXIT            asm_x64_exit
+
+#define ASM_LABEL_ASSIGN    asm_x64_label_assign
+#define ASM_JUMP            asm_x64_jmp_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \
+    do { \
+        asm_x64_test_r8_with_r8(as, reg, reg); \
+        asm_x64_jcc_label(as, ASM_X64_CC_JZ, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \
+    do { \
+        asm_x64_test_r8_with_r8(as, reg, reg); \
+        asm_x64_jcc_label(as, ASM_X64_CC_JNZ, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+    do { \
+        asm_x64_cmp_r64_with_r64(as, reg1, reg2); \
+        asm_x64_jcc_label(as, ASM_X64_CC_JE, label); \
+    } while (0)
+#define ASM_CALL_IND(as, ptr, idx) asm_x64_call_ind(as, ptr, REG_RAX)
+
+#define ASM_MOV_REG_TO_LOCAL        asm_x64_mov_r64_to_local
+#define ASM_MOV_IMM_TO_REG          asm_x64_mov_i64_to_r64_optimised
+#define ASM_MOV_ALIGNED_IMM_TO_REG  asm_x64_mov_i64_to_r64_aligned
+#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \
+    do { \
+        asm_x64_mov_i64_to_r64_optimised(as, (imm), (reg_temp)); \
+        asm_x64_mov_r64_to_local(as, (reg_temp), (local_num)); \
+    } while (false)
+#define ASM_MOV_LOCAL_TO_REG        asm_x64_mov_local_to_r64
+#define ASM_MOV_REG_TO_REG          asm_x64_mov_r64_to_r64
+#define ASM_MOV_LOCAL_ADDR_TO_REG   asm_x64_mov_local_addr_to_r64
+
+#elif N_X86
+
+// x86 specific stuff
+
+#include "asmx86.h"
+
+STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
+    [MP_F_CONVERT_OBJ_TO_NATIVE] = 2,
+    [MP_F_CONVERT_NATIVE_TO_OBJ] = 2,
+    [MP_F_LOAD_CONST_INT] = 1,
+    [MP_F_LOAD_CONST_DEC] = 1,
+    [MP_F_LOAD_CONST_STR] = 1,
+    [MP_F_LOAD_CONST_BYTES] = 1,
+    [MP_F_LOAD_NAME] = 1,
+    [MP_F_LOAD_GLOBAL] = 1,
+    [MP_F_LOAD_BUILD_CLASS] = 0,
+    [MP_F_LOAD_ATTR] = 2,
+    [MP_F_LOAD_METHOD] = 3,
+    [MP_F_STORE_NAME] = 2,
+    [MP_F_STORE_GLOBAL] = 2,
+    [MP_F_STORE_ATTR] = 3,
+    [MP_F_OBJ_SUBSCR] = 3,
+    [MP_F_OBJ_IS_TRUE] = 1,
+    [MP_F_UNARY_OP] = 2,
+    [MP_F_BINARY_OP] = 3,
+    [MP_F_BUILD_TUPLE] = 2,
+    [MP_F_BUILD_LIST] = 2,
+    [MP_F_LIST_APPEND] = 2,
+    [MP_F_BUILD_MAP] = 1,
+    [MP_F_STORE_MAP] = 3,
+#if MICROPY_PY_BUILTINS_SET
+    [MP_F_BUILD_SET] = 2,
+    [MP_F_STORE_SET] = 2,
+#endif
+    [MP_F_MAKE_FUNCTION_FROM_RAW_CODE] = 3,
+    [MP_F_NATIVE_CALL_FUNCTION_N_KW] = 3,
+    [MP_F_CALL_METHOD_N_KW] = 3,
+    [MP_F_GETITER] = 1,
+    [MP_F_ITERNEXT] = 1,
+    [MP_F_NLR_PUSH] = 1,
+    [MP_F_NLR_POP] = 0,
+    [MP_F_NATIVE_RAISE] = 1,
+    [MP_F_IMPORT_NAME] = 3,
+    [MP_F_IMPORT_FROM] = 2,
+    [MP_F_IMPORT_ALL] = 1,
+#if MICROPY_PY_BUILTINS_SLICE
+    [MP_F_NEW_SLICE] = 3,
+#endif
+    [MP_F_UNPACK_SEQUENCE] = 3,
+    [MP_F_UNPACK_EX] = 3,
+    [MP_F_DELETE_NAME] = 1,
+    [MP_F_DELETE_GLOBAL] = 1,
+};
+
+#define EXPORT_FUN(name) emit_native_x86_##name
+
+#define REG_TEMP0 (REG_EAX)
+#define REG_TEMP1 (REG_EDI)
+#define REG_TEMP2 (REG_ESI)
+
+#define REG_LOCAL_1 (REG_EBX)
+#define REG_LOCAL_NUM (1)
+
+#define ASM_PASS_COMPUTE    ASM_X86_PASS_COMPUTE
+#define ASM_PASS_EMIT       ASM_X86_PASS_EMIT
+
+#define ASM_T               asm_x86_t
+#define ASM_NEW             asm_x86_new
+#define ASM_FREE            asm_x86_free
+#define ASM_GET_CODE        asm_x86_get_code
+#define ASM_GET_CODE_SIZE   asm_x86_get_code_size
+#define ASM_START_PASS      asm_x86_start_pass
+#define ASM_END_PASS        asm_x86_end_pass
+#define ASM_ENTRY           asm_x86_entry
+#define ASM_EXIT            asm_x86_exit
+
+#define ASM_LABEL_ASSIGN    asm_x86_label_assign
+#define ASM_JUMP            asm_x86_jmp_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \
+    do { \
+        asm_x86_test_r8_with_r8(as, reg, reg); \
+        asm_x86_jcc_label(as, ASM_X86_CC_JZ, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \
+    do { \
+        asm_x86_test_r8_with_r8(as, reg, reg); \
+        asm_x86_jcc_label(as, ASM_X86_CC_JNZ, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+    do { \
+        asm_x86_cmp_r32_with_r32(as, reg1, reg2); \
+        asm_x86_jcc_label(as, ASM_X86_CC_JE, label); \
+    } while (0)
+#define ASM_CALL_IND(as, ptr, idx) asm_x86_call_ind(as, ptr, mp_f_n_args[idx], REG_EAX)
+
+#define ASM_MOV_REG_TO_LOCAL        asm_x86_mov_r32_to_local
+#define ASM_MOV_IMM_TO_REG          asm_x86_mov_i32_to_r32
+#define ASM_MOV_ALIGNED_IMM_TO_REG  asm_x86_mov_i32_to_r32_aligned
+#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \
+    do { \
+        asm_x86_mov_i32_to_r32(as, (imm), (reg_temp)); \
+        asm_x86_mov_r32_to_local(as, (reg_temp), (local_num)); \
+    } while (false)
+#define ASM_MOV_LOCAL_TO_REG        asm_x86_mov_local_to_r32
+#define ASM_MOV_REG_TO_REG          asm_x86_mov_r32_to_r32
+#define ASM_MOV_LOCAL_ADDR_TO_REG   asm_x86_mov_local_addr_to_r32
 
 #elif N_THUMB
 
@@ -99,23 +246,60 @@
 
 #include "asmthumb.h"
 
-#define REG_LOCAL_1 (REG_R4)
-#define REG_LOCAL_2 (REG_R5)
-#define REG_LOCAL_3 (REG_R6)
-#define REG_LOCAL_NUM (3)
-
 #define EXPORT_FUN(name) emit_native_thumb_##name
 
 #define REG_TEMP0 (REG_R0)
 #define REG_TEMP1 (REG_R1)
 #define REG_TEMP2 (REG_R2)
-#define ASM_MOV_REG_TO_LOCAL(reg, local_num) asm_thumb_mov_local_reg(emit->as, (local_num), (reg))
-#define ASM_MOV_IMM_TO_REG(imm, reg) asm_thumb_mov_reg_i32_optimised(emit->as, (reg), (imm))
-#define ASM_MOV_ALIGNED_IMM_TO_REG(imm, reg) asm_thumb_mov_reg_i32_aligned(emit->as, (reg), (imm))
-#define ASM_MOV_IMM_TO_LOCAL_USING(imm, local_num, reg_temp) do { asm_thumb_mov_reg_i32_optimised(emit->as, (reg_temp), (imm)); asm_thumb_mov_local_reg(emit->as, (local_num), (reg_temp)); } while (false)
-#define ASM_MOV_LOCAL_TO_REG(local_num, reg) asm_thumb_mov_reg_local(emit->as, (reg), (local_num))
-#define ASM_MOV_REG_TO_REG(reg_src, reg_dest) asm_thumb_mov_reg_reg(emit->as, (reg_dest), (reg_src))
-#define ASM_MOV_LOCAL_ADDR_TO_REG(local_num, reg) asm_thumb_mov_reg_local_addr(emit->as, (reg), (local_num))
+
+#define REG_LOCAL_1 (REG_R4)
+#define REG_LOCAL_2 (REG_R5)
+#define REG_LOCAL_3 (REG_R6)
+#define REG_LOCAL_NUM (3)
+
+#define ASM_PASS_COMPUTE    ASM_THUMB_PASS_COMPUTE
+#define ASM_PASS_EMIT       ASM_THUMB_PASS_EMIT
+
+#define ASM_T               asm_thumb_t
+#define ASM_NEW             asm_thumb_new
+#define ASM_FREE            asm_thumb_free
+#define ASM_GET_CODE        asm_thumb_get_code
+#define ASM_GET_CODE_SIZE   asm_thumb_get_code_size
+#define ASM_START_PASS      asm_thumb_start_pass
+#define ASM_END_PASS        asm_thumb_end_pass
+#define ASM_ENTRY           asm_thumb_entry
+#define ASM_EXIT            asm_thumb_exit
+
+#define ASM_LABEL_ASSIGN    asm_thumb_label_assign
+#define ASM_JUMP            asm_thumb_b_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \
+    do { \
+        asm_thumb_cmp_rlo_i8(as, reg, 0); \
+        asm_thumb_bcc_label(as, THUMB_CC_EQ, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \
+    do { \
+        asm_thumb_cmp_rlo_i8(as, reg, 0); \
+        asm_thumb_bcc_label(as, THUMB_CC_NE, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+    do { \
+        asm_thumb_cmp_rlo_rlo(as, reg1, reg2); \
+        asm_thumb_bcc_label(as, THUMB_CC_EQ, label); \
+    } while (0)
+#define ASM_CALL_IND(as, ptr, idx) asm_thumb_bl_ind(as, ptr, idx, REG_R3)
+
+#define ASM_MOV_REG_TO_LOCAL(as, reg, local_num) asm_thumb_mov_local_reg(as, (local_num), (reg))
+#define ASM_MOV_IMM_TO_REG(as, imm, reg) asm_thumb_mov_reg_i32_optimised(as, (reg), (imm))
+#define ASM_MOV_ALIGNED_IMM_TO_REG(as, imm, reg) asm_thumb_mov_reg_i32_aligned(as, (reg), (imm))
+#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \
+    do { \
+        asm_thumb_mov_reg_i32_optimised(as, (reg_temp), (imm)); \
+        asm_thumb_mov_local_reg(as, (local_num), (reg_temp)); \
+    } while (false)
+#define ASM_MOV_LOCAL_TO_REG(as, local_num, reg) asm_thumb_mov_reg_local(as, (reg), (local_num))
+#define ASM_MOV_REG_TO_REG(as, reg_src, reg_dest) asm_thumb_mov_reg_reg(as, (reg_dest), (reg_src))
+#define ASM_MOV_LOCAL_ADDR_TO_REG(as, local_num, reg) asm_thumb_mov_reg_local_addr(as, (reg), (local_num))
 
 #elif N_ARM
 
@@ -123,23 +307,64 @@
 
 #include "asmarm.h"
 
-#define REG_LOCAL_1 (REG_R4)
-#define REG_LOCAL_2 (REG_R5)
-#define REG_LOCAL_3 (REG_R6)
-#define REG_LOCAL_NUM (3)
-
 #define EXPORT_FUN(name) emit_native_arm_##name
 
 #define REG_TEMP0 (REG_R0)
 #define REG_TEMP1 (REG_R1)
 #define REG_TEMP2 (REG_R2)
-#define ASM_MOV_REG_TO_LOCAL(reg, local_num) asm_arm_mov_local_reg(emit->as, (local_num), (reg))
-#define ASM_MOV_IMM_TO_REG(imm, reg) asm_arm_mov_reg_i32(emit->as, (reg), (imm))
-#define ASM_MOV_ALIGNED_IMM_TO_REG(imm, reg) asm_arm_mov_reg_i32(emit->as, (reg), (imm))
-#define ASM_MOV_IMM_TO_LOCAL_USING(imm, local_num, reg_temp) do { asm_arm_mov_reg_i32(emit->as, (reg_temp), (imm)); asm_arm_mov_local_reg(emit->as, (local_num), (reg_temp)); } while (false)
-#define ASM_MOV_LOCAL_TO_REG(local_num, reg) asm_arm_mov_reg_local(emit->as, (reg), (local_num))
-#define ASM_MOV_REG_TO_REG(reg_src, reg_dest) asm_arm_mov_reg_reg(emit->as, (reg_dest), (reg_src))
-#define ASM_MOV_LOCAL_ADDR_TO_REG(local_num, reg) asm_arm_mov_reg_local_addr(emit->as, (reg), (local_num))
+
+#define REG_LOCAL_1 (REG_R4)
+#define REG_LOCAL_2 (REG_R5)
+#define REG_LOCAL_3 (REG_R6)
+#define REG_LOCAL_NUM (3)
+
+#define ASM_PASS_COMPUTE    ASM_ARM_PASS_COMPUTE
+#define ASM_PASS_EMIT       ASM_ARM_PASS_EMIT
+
+#define ASM_T               asm_arm_t
+#define ASM_NEW             asm_arm_new
+#define ASM_FREE            asm_arm_free
+#define ASM_GET_CODE        asm_arm_get_code
+#define ASM_GET_CODE_SIZE   asm_arm_get_code_size
+#define ASM_START_PASS      asm_arm_start_pass
+#define ASM_END_PASS        asm_arm_end_pass
+#define ASM_ENTRY           asm_arm_entry
+#define ASM_EXIT            asm_arm_exit
+
+#define ASM_LABEL_ASSIGN    asm_arm_label_assign
+#define ASM_JUMP            asm_arm_b_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label) \
+    do { \
+        asm_arm_cmp_reg_i8(as, reg, 0); \
+        asm_arm_bcc_label(as, ARM_CC_EQ, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label) \
+    do { \
+        asm_arm_cmp_reg_i8(as, reg, 0); \
+        asm_arm_bcc_label(as, ARM_CC_NE, label); \
+    } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+    do { \
+        asm_arm_cmp_reg_reg(as, reg1, reg2); \
+        asm_arm_bcc_label(as, ARM_CC_EQ, label); \
+    } while (0)
+#define ASM_CALL_IND(as, ptr, idx) asm_arm_bl_ind(as, ptr, idx, REG_R3)
+
+#define ASM_MOV_REG_TO_LOCAL(as, reg, local_num) asm_arm_mov_local_reg(as, (local_num), (reg))
+#define ASM_MOV_IMM_TO_REG(as, imm, reg) asm_arm_mov_reg_i32(as, (reg), (imm))
+#define ASM_MOV_ALIGNED_IMM_TO_REG(as, imm, reg) asm_arm_mov_reg_i32(as, (reg), (imm))
+#define ASM_MOV_IMM_TO_LOCAL_USING(as, imm, local_num, reg_temp) \
+    do { \
+        asm_arm_mov_reg_i32(as, (reg_temp), (imm)); \
+        asm_arm_mov_local_reg(as, (local_num), (reg_temp)); \
+    } while (false)
+#define ASM_MOV_LOCAL_TO_REG(as, local_num, reg) asm_arm_mov_reg_local(as, (reg), (local_num))
+#define ASM_MOV_REG_TO_REG(as, reg_src, reg_dest) asm_arm_mov_reg_reg(as, (reg_dest), (reg_src))
+#define ASM_MOV_LOCAL_ADDR_TO_REG(as, local_num, reg) asm_arm_mov_reg_local_addr(as, (reg), (local_num))
+
+#else
+
+#error unknown native emitter
 
 #endif
 
@@ -189,35 +414,17 @@ struct _emit_t {
 
     scope_t *scope;
 
-#if N_X64
-    asm_x64_t *as;
-#elif N_THUMB
-    asm_thumb_t *as;
-#elif N_ARM
-    asm_arm_t *as;
-#endif
+    ASM_T *as;
 };
 
 emit_t *EXPORT_FUN(new)(uint max_num_labels) {
     emit_t *emit = m_new0(emit_t, 1);
-#if N_X64
-    emit->as = asm_x64_new(max_num_labels);
-#elif N_THUMB
-    emit->as = asm_thumb_new(max_num_labels);
-#elif N_ARM
-    emit->as = asm_arm_new(max_num_labels);
-#endif
+    emit->as = ASM_NEW(max_num_labels);
     return emit;
 }
 
 void EXPORT_FUN(free)(emit_t *emit) {
-#if N_X64
-    asm_x64_free(emit->as, false);
-#elif N_THUMB
-    asm_thumb_free(emit->as, false);
-#elif N_ARM
-    asm_arm_free(emit->as, false);
-#endif
+    ASM_FREE(emit->as, false);
     m_del(vtype_kind_t, emit->local_vtype, emit->local_vtype_alloc);
     m_del(stack_info_t, emit->stack_info, emit->stack_info_alloc);
     m_del_obj(emit_t, emit);
@@ -286,13 +493,7 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop
         emit->stack_info[i].vtype = VTYPE_UNBOUND;
     }
 
-#if N_X64
-    asm_x64_start_pass(emit->as, pass == MP_PASS_EMIT ? ASM_X64_PASS_EMIT : ASM_X64_PASS_COMPUTE);
-#elif N_THUMB
-    asm_thumb_start_pass(emit->as, pass == MP_PASS_EMIT ? ASM_THUMB_PASS_EMIT : ASM_THUMB_PASS_COMPUTE);
-#elif N_ARM
-    asm_arm_start_pass(emit->as, pass == MP_PASS_EMIT ? ASM_ARM_PASS_EMIT : ASM_ARM_PASS_COMPUTE);
-#endif
+    ASM_START_PASS(emit->as, pass == MP_PASS_EMIT ? ASM_PASS_EMIT : ASM_PASS_COMPUTE);
 
     // entry to function
     int num_locals = 0;
@@ -304,13 +505,7 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop
         emit->stack_start = num_locals;
         num_locals += scope->stack_size;
     }
-#if N_X64
-    asm_x64_entry(emit->as, num_locals);
-#elif N_THUMB
-    asm_thumb_entry(emit->as, num_locals);
-#elif N_ARM
-    asm_arm_entry(emit->as, num_locals);
-#endif
+    ASM_ENTRY(emit->as, num_locals);
 
     // initialise locals from parameters
 #if N_X64
@@ -326,6 +521,21 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop
             assert(0);
         }
     }
+#elif N_X86
+    for (int i = 0; i < scope->num_pos_args; i++) {
+        // TODO
+        assert(0);
+        if (i == 0) {
+            asm_x86_mov_r32_to_r32(emit->as, REG_ARG_1, REG_LOCAL_1);
+        } else if (i == 1) {
+            asm_x86_mov_r32_to_local(emit->as, REG_ARG_2, i - REG_LOCAL_NUM);
+        } else if (i == 2) {
+            asm_x86_mov_r32_to_local(emit->as, REG_ARG_3, i - REG_LOCAL_NUM);
+        } else {
+            // TODO not implemented
+            assert(0);
+        }
+    }
 #elif N_THUMB
     for (int i = 0; i < scope->num_pos_args; i++) {
         if (i == 0) {
@@ -360,26 +570,16 @@ STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scop
     }
 
     asm_arm_mov_reg_i32(emit->as, REG_R7, (mp_uint_t)mp_fun_table);
+#else
+    #error not implemented
 #endif
 }
 
 STATIC void emit_native_end_pass(emit_t *emit) {
-#if N_X64
-    if (!emit->last_emit_was_return_value) {
-        asm_x64_exit(emit->as);
-    }
-    asm_x64_end_pass(emit->as);
-#elif N_THUMB
-    if (!emit->last_emit_was_return_value) {
-        asm_thumb_exit(emit->as);
-    }
-    asm_thumb_end_pass(emit->as);
-#elif N_ARM
     if (!emit->last_emit_was_return_value) {
-        asm_arm_exit(emit->as);
+        ASM_EXIT(emit->as);
     }
-    asm_arm_end_pass(emit->as);    
-#endif
+    ASM_END_PASS(emit->as);
 
     // check stack is back to zero size
     if (emit->stack_size != 0) {
@@ -387,16 +587,8 @@ STATIC void emit_native_end_pass(emit_t *emit) {
     }
 
     if (emit->pass == MP_PASS_EMIT) {
-#if N_X64
-        void *f = asm_x64_get_code(emit->as);
-        mp_uint_t f_len = asm_x64_get_code_size(emit->as);
-#elif N_THUMB
-        void *f = asm_thumb_get_code(emit->as);
-        mp_uint_t f_len = asm_thumb_get_code_size(emit->as);
-#elif N_ARM
-        void *f = asm_arm_get_code(emit->as);
-        mp_uint_t f_len = asm_arm_get_code_size(emit->as);
-#endif
+        void *f = ASM_GET_CODE(emit->as);
+        mp_uint_t f_len = ASM_GET_CODE_SIZE(emit->as);
 
         // compute type signature
         // TODO check that viper types here convert correctly to valid types for emit glue
@@ -450,7 +642,7 @@ STATIC void emit_native_pre(emit_t *emit) {
                 case STACK_REG:
                     // TODO only push reg if in regs_needed
                     emit->stack_info[i].kind = STACK_VALUE;
-                    ASM_MOV_REG_TO_LOCAL(emit->stack_info[i].u_reg, emit->stack_start + i);
+                    ASM_MOV_REG_TO_LOCAL(emit->as, emit->stack_info[i].u_reg, emit->stack_start + i);
                     break;
 
                 case STACK_IMM:
@@ -476,7 +668,7 @@ STATIC void need_reg_single(emit_t *emit, int reg_needed, int skip_stack_pos) {
             stack_info_t *si = &emit->stack_info[i];
             if (si->kind == STACK_REG && si->u_reg == reg_needed) {
                 si->kind = STACK_VALUE;
-                ASM_MOV_REG_TO_LOCAL(si->u_reg, emit->stack_start + i);
+                ASM_MOV_REG_TO_LOCAL(emit->as, si->u_reg, emit->stack_start + i);
             }
         }
     }
@@ -487,7 +679,7 @@ STATIC void need_reg_all(emit_t *emit) {
         stack_info_t *si = &emit->stack_info[i];
         if (si->kind == STACK_REG) {
             si->kind = STACK_VALUE;
-            ASM_MOV_REG_TO_LOCAL(si->u_reg, emit->stack_start + i);
+            ASM_MOV_REG_TO_LOCAL(emit->as, si->u_reg, emit->stack_start + i);
         }
     }
 }
@@ -497,14 +689,14 @@ STATIC void need_stack_settled(emit_t *emit) {
         stack_info_t *si = &emit->stack_info[i];
         if (si->kind == STACK_REG) {
             si->kind = STACK_VALUE;
-            ASM_MOV_REG_TO_LOCAL(si->u_reg, emit->stack_start + i);
+            ASM_MOV_REG_TO_LOCAL(emit->as, si->u_reg, emit->stack_start + i);
         }
     }
     for (int i = 0; i < emit->stack_size; i++) {
         stack_info_t *si = &emit->stack_info[i];
         if (si->kind == STACK_IMM) {
             si->kind = STACK_VALUE;
-            ASM_MOV_IMM_TO_LOCAL_USING(si->u_imm, emit->stack_start + i, REG_TEMP0);
+            ASM_MOV_IMM_TO_LOCAL_USING(emit->as, si->u_imm, emit->stack_start + i, REG_TEMP0);
         }
     }
 }
@@ -516,17 +708,17 @@ STATIC void emit_access_stack(emit_t *emit, int pos, vtype_kind_t *vtype, int re
     *vtype = si->vtype;
     switch (si->kind) {
         case STACK_VALUE:
-            ASM_MOV_LOCAL_TO_REG(emit->stack_start + emit->stack_size - pos, reg_dest);
+            ASM_MOV_LOCAL_TO_REG(emit->as, emit->stack_start + emit->stack_size - pos, reg_dest);
             break;
 
         case STACK_REG:
             if (si->u_reg != reg_dest) {
-                ASM_MOV_REG_TO_REG(si->u_reg, reg_dest);
+                ASM_MOV_REG_TO_REG(emit->as, si->u_reg, reg_dest);
             }
             break;
 
         case STACK_IMM:
-            ASM_MOV_IMM_TO_REG(si->u_imm, reg_dest);
+            ASM_MOV_IMM_TO_REG(emit->as, si->u_imm, reg_dest);
             break;
     }
 }
@@ -592,66 +784,36 @@ STATIC void emit_post_push_reg_reg_reg_reg(emit_t *emit, vtype_kind_t vtypea, in
 
 STATIC void emit_call(emit_t *emit, mp_fun_kind_t fun_kind) {
     need_reg_all(emit);
-#if N_X64
-    asm_x64_call_ind(emit->as, mp_fun_table[fun_kind], REG_RAX);
-#elif N_THUMB
-    asm_thumb_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
-#elif N_ARM
-    asm_arm_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
-#endif
+    ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind);
 }
 
 STATIC void emit_call_with_imm_arg(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val, int arg_reg) {
     need_reg_all(emit);
-    ASM_MOV_IMM_TO_REG(arg_val, arg_reg);
-#if N_X64
-    asm_x64_call_ind(emit->as, mp_fun_table[fun_kind], REG_RAX);
-#elif N_THUMB
-    asm_thumb_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
-#elif N_ARM
-    asm_arm_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
-#endif
+    ASM_MOV_IMM_TO_REG(emit->as, arg_val, arg_reg);
+    ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind);
 }
 
 // the first arg is stored in the code aligned on a mp_uint_t boundary
 STATIC void emit_call_with_imm_arg_aligned(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val, int arg_reg) {
     need_reg_all(emit);
-    ASM_MOV_ALIGNED_IMM_TO_REG(arg_val, arg_reg);
-#if N_X64
-    asm_x64_call_ind(emit->as, mp_fun_table[fun_kind], REG_RAX);
-#elif N_THUMB
-    asm_thumb_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
-#elif N_ARM
-    asm_arm_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
-#endif
+    ASM_MOV_ALIGNED_IMM_TO_REG(emit->as, arg_val, arg_reg);
+    ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind);
 }
 
 STATIC void emit_call_with_2_imm_args(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val1, int arg_reg1, mp_int_t arg_val2, int arg_reg2) {
     need_reg_all(emit);
-    ASM_MOV_IMM_TO_REG(arg_val1, arg_reg1);
-    ASM_MOV_IMM_TO_REG(arg_val2, arg_reg2);
-#if N_X64
-    asm_x64_call_ind(emit->as, mp_fun_table[fun_kind], REG_RAX);
-#elif N_THUMB
-    asm_thumb_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
-#elif N_ARM
-    asm_arm_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
-#endif
+    ASM_MOV_IMM_TO_REG(emit->as, arg_val1, arg_reg1);
+    ASM_MOV_IMM_TO_REG(emit->as, arg_val2, arg_reg2);
+    ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind);
 }
 
 // the first arg is stored in the code aligned on a mp_uint_t boundary
 STATIC void emit_call_with_3_imm_args_and_first_aligned(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val1, int arg_reg1, mp_int_t arg_val2, int arg_reg2, mp_int_t arg_val3, int arg_reg3) {
     need_reg_all(emit);
-    ASM_MOV_ALIGNED_IMM_TO_REG(arg_val1, arg_reg1);
-    ASM_MOV_IMM_TO_REG(arg_val2, arg_reg2);
-    ASM_MOV_IMM_TO_REG(arg_val3, arg_reg3);
-#if N_X64
-    asm_x64_call_ind(emit->as, mp_fun_table[fun_kind], REG_RAX);
-#elif N_THUMB
-    asm_thumb_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
-#elif N_ARM
-    asm_arm_bl_ind(emit->as, mp_fun_table[fun_kind], fun_kind, REG_R3);
-#endif
+    ASM_MOV_ALIGNED_IMM_TO_REG(emit->as, arg_val1, arg_reg1);
+    ASM_MOV_IMM_TO_REG(emit->as, arg_val2, arg_reg2);
+    ASM_MOV_IMM_TO_REG(emit->as, arg_val3, arg_reg3);
+    ASM_CALL_IND(emit->as, mp_fun_table[fun_kind], fun_kind);
 }
 
 // vtype of all n_pop objects is VTYPE_PYOBJ
@@ -670,19 +832,19 @@ STATIC void emit_get_stack_pointer_to_reg_for_pop(emit_t *emit, mp_uint_t reg_de
             si->kind = STACK_VALUE;
             switch (si->vtype) {
                 case VTYPE_PYOBJ:
-                    ASM_MOV_IMM_TO_LOCAL_USING(si->u_imm, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
+                    ASM_MOV_IMM_TO_LOCAL_USING(emit->as, si->u_imm, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
                     break;
                 case VTYPE_BOOL:
                     if (si->u_imm == 0) {
-                        ASM_MOV_IMM_TO_LOCAL_USING((mp_uint_t)mp_const_false, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
+                        ASM_MOV_IMM_TO_LOCAL_USING(emit->as, (mp_uint_t)mp_const_false, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
                     } else {
-                        ASM_MOV_IMM_TO_LOCAL_USING((mp_uint_t)mp_const_true, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
+                        ASM_MOV_IMM_TO_LOCAL_USING(emit->as, (mp_uint_t)mp_const_true, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
                     }
                     si->vtype = VTYPE_PYOBJ;
                     break;
                 case VTYPE_INT:
                 case VTYPE_UINT:
-                    ASM_MOV_IMM_TO_LOCAL_USING((si->u_imm << 1) | 1, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
+                    ASM_MOV_IMM_TO_LOCAL_USING(emit->as, (si->u_imm << 1) | 1, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
                     si->vtype = VTYPE_PYOBJ;
                     break;
                 default:
@@ -700,16 +862,16 @@ STATIC void emit_get_stack_pointer_to_reg_for_pop(emit_t *emit, mp_uint_t reg_de
         stack_info_t *si = &emit->stack_info[emit->stack_size - 1 - i];
         if (si->vtype != VTYPE_PYOBJ) {
             mp_uint_t local_num = emit->stack_start + emit->stack_size - 1 - i;
-            ASM_MOV_LOCAL_TO_REG(local_num, REG_ARG_1);
+            ASM_MOV_LOCAL_TO_REG(emit->as, local_num, REG_ARG_1);
             emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, si->vtype, REG_ARG_2); // arg2 = type
-            ASM_MOV_REG_TO_LOCAL(REG_RET, local_num);
+            ASM_MOV_REG_TO_LOCAL(emit->as, REG_RET, local_num);
             si->vtype = VTYPE_PYOBJ;
         }
     }
 
     // Adujust the stack for a pop of n_pop items, and load the stack pointer into reg_dest.
     adjust_stack(emit, -n_pop);
-    ASM_MOV_LOCAL_ADDR_TO_REG(emit->stack_start + emit->stack_size, reg_dest);
+    ASM_MOV_LOCAL_ADDR_TO_REG(emit->as, emit->stack_start + emit->stack_size, reg_dest);
 }
 
 // vtype of all n_push objects is VTYPE_PYOBJ
@@ -719,7 +881,7 @@ STATIC void emit_get_stack_pointer_to_reg_for_push(emit_t *emit, mp_uint_t reg_d
         emit->stack_info[emit->stack_size + i].kind = STACK_VALUE;
         emit->stack_info[emit->stack_size + i].vtype = VTYPE_PYOBJ;
     }
-    ASM_MOV_LOCAL_ADDR_TO_REG(emit->stack_start + emit->stack_size, reg_dest);
+    ASM_MOV_LOCAL_ADDR_TO_REG(emit->as, emit->stack_start + emit->stack_size, reg_dest);
     adjust_stack(emit, n_push);
 }
 
@@ -739,13 +901,7 @@ STATIC void emit_native_label_assign(emit_t *emit, uint l) {
     emit_native_pre(emit);
     // need to commit stack because we can jump here from elsewhere
     need_stack_settled(emit);
-#if N_X64
-    asm_x64_label_assign(emit->as, l);
-#elif N_THUMB
-    asm_thumb_label_assign(emit->as, l);
-#elif N_ARM
-    asm_arm_label_assign(emit->as, l);
-#endif
+    ASM_LABEL_ASSIGN(emit->as, l);
     emit_post(emit);
 }
 
@@ -864,6 +1020,14 @@ STATIC void emit_native_load_fast(emit_t *emit, qstr qstr, uint id_flags, int lo
         asm_x64_mov_local_to_r64(emit->as, local_num - REG_LOCAL_NUM, REG_RAX);
         emit_post_push_reg(emit, vtype, REG_RAX);
     }
+#elif N_X86
+    if (local_num == 0) {
+        emit_post_push_reg(emit, vtype, REG_LOCAL_1);
+    } else {
+        need_reg_single(emit, REG_EAX, 0);
+        asm_x86_mov_local_to_r32(emit->as, local_num - REG_LOCAL_NUM, REG_EAX);
+        emit_post_push_reg(emit, vtype, REG_EAX);
+    }
 #elif N_THUMB
     if (local_num == 0) {
         emit_post_push_reg(emit, vtype, REG_LOCAL_1);
@@ -888,6 +1052,8 @@ STATIC void emit_native_load_fast(emit_t *emit, qstr qstr, uint id_flags, int lo
         asm_arm_mov_reg_local(emit->as, REG_R0, local_num - REG_LOCAL_NUM);
         emit_post_push_reg(emit, vtype, REG_R0);
     }
+#else
+    #error not implemented
 #endif
 }
 
@@ -955,6 +1121,13 @@ STATIC void emit_native_store_fast(emit_t *emit, qstr qstr, int local_num) {
         emit_pre_pop_reg(emit, &vtype, REG_RAX);
         asm_x64_mov_r64_to_local(emit->as, REG_RAX, local_num - REG_LOCAL_NUM);
     }
+#elif N_X86
+    if (local_num == 0) {
+        emit_pre_pop_reg(emit, &vtype, REG_LOCAL_1);
+    } else {
+        emit_pre_pop_reg(emit, &vtype, REG_EAX);
+        asm_x86_mov_r32_to_local(emit->as, REG_EAX, local_num - REG_LOCAL_NUM);
+    }
 #elif N_THUMB
     if (local_num == 0) {
         emit_pre_pop_reg(emit, &vtype, REG_LOCAL_1);
@@ -977,6 +1150,8 @@ STATIC void emit_native_store_fast(emit_t *emit, qstr qstr, int local_num) {
         emit_pre_pop_reg(emit, &vtype, REG_R0);
         asm_arm_mov_local_reg(emit->as, local_num - REG_LOCAL_NUM, REG_R0);
     }
+#else
+    #error not implemented
 #endif
 
     emit_post(emit);
@@ -1012,7 +1187,7 @@ STATIC void emit_native_store_global(emit_t *emit, qstr qstr) {
     } else {
         emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
         emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, vtype, REG_ARG_2); // arg2 = type
-        ASM_MOV_REG_TO_REG(REG_RET, REG_ARG_2);
+        ASM_MOV_REG_TO_REG(emit->as, REG_RET, REG_ARG_2);
     }
     emit_call_with_imm_arg(emit, MP_F_STORE_GLOBAL, qstr, REG_ARG_1); // arg1 = name
     emit_post(emit);
@@ -1110,13 +1285,7 @@ STATIC void emit_native_jump(emit_t *emit, uint label) {
     emit_native_pre(emit);
     // need to commit stack because we are jumping elsewhere
     need_stack_settled(emit);
-#if N_X64
-    asm_x64_jmp_label(emit->as, label);
-#elif N_THUMB
-    asm_thumb_b_label(emit->as, label);
-#elif N_ARM
-    asm_arm_b_label(emit->as, label);
-#endif
+    ASM_JUMP(emit->as, label);
     emit_post(emit);
 }
 
@@ -1143,62 +1312,26 @@ STATIC void emit_native_jump_helper(emit_t *emit, uint label, bool pop) {
 
 STATIC void emit_native_pop_jump_if_true(emit_t *emit, uint label) {
     emit_native_jump_helper(emit, label, true);
-#if N_X64
-    asm_x64_test_r8_with_r8(emit->as, REG_RET, REG_RET);
-    asm_x64_jcc_label(emit->as, JCC_JNZ, label);
-#elif N_THUMB
-    asm_thumb_cmp_rlo_i8(emit->as, REG_RET, 0);
-    asm_thumb_bcc_label(emit->as, THUMB_CC_NE, label);
-#elif N_ARM
-    asm_arm_cmp_reg_i8(emit->as, REG_RET, 0);
-    asm_arm_bcc_label(emit->as, ARM_CC_NE, label);
-#endif
+    ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, label);
     emit_post(emit);
 }
 
 STATIC void emit_native_pop_jump_if_false(emit_t *emit, uint label) {
     emit_native_jump_helper(emit, label, true);
-#if N_X64
-    asm_x64_test_r8_with_r8(emit->as, REG_RET, REG_RET);
-    asm_x64_jcc_label(emit->as, JCC_JZ, label);
-#elif N_THUMB
-    asm_thumb_cmp_rlo_i8(emit->as, REG_RET, 0);
-    asm_thumb_bcc_label(emit->as, THUMB_CC_EQ, label);
-#elif N_ARM
-    asm_arm_cmp_reg_i8(emit->as, REG_RET, 0);
-    asm_arm_bcc_label(emit->as, ARM_CC_EQ, label);
-#endif
+    ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, label);
     emit_post(emit);
 }
 
 STATIC void emit_native_jump_if_true_or_pop(emit_t *emit, uint label) {
     emit_native_jump_helper(emit, label, false);
-#if N_X64
-    asm_x64_test_r8_with_r8(emit->as, REG_RET, REG_RET);
-    asm_x64_jcc_label(emit->as, JCC_JNZ, label);
-#elif N_THUMB
-    asm_thumb_cmp_rlo_i8(emit->as, REG_RET, 0);
-    asm_thumb_bcc_label(emit->as, THUMB_CC_NE, label);
-#elif N_ARM
-    asm_arm_cmp_reg_i8(emit->as, REG_RET, 0);
-    asm_arm_bcc_label(emit->as, ARM_CC_NE, label);
-#endif
+    ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, label);
     adjust_stack(emit, -1);
     emit_post(emit);
 }
 
 STATIC void emit_native_jump_if_false_or_pop(emit_t *emit, uint label) {
     emit_native_jump_helper(emit, label, false);
-#if N_X64
-    asm_x64_test_r8_with_r8(emit->as, REG_RET, REG_RET);
-    asm_x64_jcc_label(emit->as, JCC_JZ, label);
-#elif N_THUMB
-    asm_thumb_cmp_rlo_i8(emit->as, REG_RET, 0);
-    asm_thumb_bcc_label(emit->as, THUMB_CC_EQ, label);
-#elif N_ARM
-    asm_arm_cmp_reg_i8(emit->as, REG_RET, 0);
-    asm_arm_bcc_label(emit->as, ARM_CC_EQ, label);
-#endif
+    ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, label);
     adjust_stack(emit, -1);
     emit_post(emit);
 }
@@ -1226,16 +1359,7 @@ STATIC void emit_native_setup_except(emit_t *emit, uint label) {
     need_stack_settled(emit);
     emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_1, sizeof(nlr_buf_t) / sizeof(mp_uint_t)); // arg1 = pointer to nlr buf
     emit_call(emit, MP_F_NLR_PUSH);
-#if N_X64
-    asm_x64_test_r8_with_r8(emit->as, REG_RET, REG_RET);
-    asm_x64_jcc_label(emit->as, JCC_JNZ, label);
-#elif N_THUMB
-    asm_thumb_cmp_rlo_i8(emit->as, REG_RET, 0);
-    asm_thumb_bcc_label(emit->as, THUMB_CC_NE, label);
-#elif N_ARM
-    asm_arm_cmp_reg_i8(emit->as, REG_RET, 0);
-    asm_arm_bcc_label(emit->as, ARM_CC_NE, label);
-#endif
+    ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, label);
     emit_post(emit);
 }
 
@@ -1265,17 +1389,8 @@ STATIC void emit_native_for_iter(emit_t *emit, uint label) {
     emit_access_stack(emit, 1, &vtype, REG_ARG_1);
     assert(vtype == VTYPE_PYOBJ);
     emit_call(emit, MP_F_ITERNEXT);
-    ASM_MOV_IMM_TO_REG((mp_uint_t)MP_OBJ_STOP_ITERATION, REG_TEMP1);
-#if N_X64
-    asm_x64_cmp_r64_with_r64(emit->as, REG_RET, REG_TEMP1);
-    asm_x64_jcc_label(emit->as, JCC_JE, label);
-#elif N_THUMB
-    asm_thumb_cmp_rlo_rlo(emit->as, REG_RET, REG_TEMP1);
-    asm_thumb_bcc_label(emit->as, THUMB_CC_EQ, label);
-#elif N_ARM
-    asm_arm_cmp_reg_i8(emit->as, REG_RET, 0);
-    asm_arm_bcc_label(emit->as, ARM_CC_EQ, label);
-#endif
+    ASM_MOV_IMM_TO_REG(emit->as, (mp_uint_t)MP_OBJ_STOP_ITERATION, REG_TEMP1);
+    ASM_JUMP_IF_REG_EQ(emit->as, REG_RET, REG_TEMP1, label);
     emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
 }
 
@@ -1322,17 +1437,25 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
         if (op == MP_BINARY_OP_ADD || op == MP_BINARY_OP_INPLACE_ADD) {
 #if N_X64
             asm_x64_add_r64_to_r64(emit->as, REG_ARG_3, REG_ARG_2);
+#elif N_X86
+            asm_x86_add_r32_to_r32(emit->as, REG_ARG_3, REG_ARG_2);
 #elif N_THUMB
             asm_thumb_add_rlo_rlo_rlo(emit->as, REG_ARG_2, REG_ARG_2, REG_ARG_3);
 #elif N_ARM
             asm_arm_add_reg(emit->as, REG_ARG_2, REG_ARG_2, REG_ARG_3);
+#else
+    #error not implemented
 #endif
             emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
         } else if (op == MP_BINARY_OP_LESS) {
 #if N_X64
             asm_x64_xor_r64_to_r64(emit->as, REG_RET, REG_RET);
             asm_x64_cmp_r64_with_r64(emit->as, REG_ARG_3, REG_ARG_2);
-            asm_x64_setcc_r8(emit->as, JCC_JL, REG_RET);
+            asm_x64_setcc_r8(emit->as, ASM_X64_CC_JL, REG_RET);
+#elif N_X86
+            asm_x86_xor_r32_to_r32(emit->as, REG_RET, REG_RET);
+            asm_x86_cmp_r32_with_r32(emit->as, REG_ARG_3, REG_ARG_2);
+            asm_x86_setcc_r8(emit->as, ASM_X86_CC_JL, REG_RET);
 #elif N_THUMB
             asm_thumb_cmp_rlo_rlo(emit->as, REG_ARG_2, REG_ARG_3);
             asm_thumb_ite_ge(emit->as);
@@ -1340,6 +1463,8 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
             asm_thumb_mov_rlo_i8(emit->as, REG_RET, 1); // if r0 < r1
 #elif N_ARM
             asm_arm_less_op(emit->as, REG_ARG_2, REG_ARG_3);
+#else
+    #error not implemented
 #endif
             emit_post_push_reg(emit, VTYPE_BOOL, REG_RET);
         } else {
@@ -1558,7 +1683,7 @@ STATIC void emit_native_return_value(emit_t *emit) {
     if (emit->do_viper_types) {
         if (vtype == VTYPE_PTR_NONE) {
             if (emit->return_vtype == VTYPE_PYOBJ) {
-                ASM_MOV_IMM_TO_REG((mp_uint_t)mp_const_none, REG_RET);
+                ASM_MOV_IMM_TO_REG(emit->as, (mp_uint_t)mp_const_none, REG_RET);
             }
         } else if (vtype != emit->return_vtype) {
             printf("ViperTypeError: incompatible return type\n");
@@ -1567,16 +1692,8 @@ STATIC void emit_native_return_value(emit_t *emit) {
         assert(vtype == VTYPE_PYOBJ);
     }
     emit->last_emit_was_return_value = true;
-#if N_X64
-    //asm_x64_call_ind(emit->as, 0, REG_RAX); to seg fault for debugging with gdb
-    asm_x64_exit(emit->as);
-#elif N_THUMB
-    //asm_thumb_call_ind(emit->as, 0, REG_R0); to seg fault for debugging with gdb
-    asm_thumb_exit(emit->as);
-#elif N_ARM
-    //asm_arm_bkpt(emit->as); to insert a bkpt and not segfault for debugging
-    asm_arm_exit(emit->as);
-#endif
+    //ASM_BREAK_POINT(emit->as); // to insert a break-point for debugging
+    ASM_EXIT(emit->as);
 }
 
 STATIC void emit_native_raise_varargs(emit_t *emit, int n_args) {
@@ -1704,4 +1821,4 @@ const emit_method_table_t EXPORT_FUN(method_table) = {
     emit_native_end_except_handler,
 };
 
-#endif // (MICROPY_EMIT_X64 && N_X64) || (MICROPY_EMIT_THUMB && N_THUMB) || (MICROPY_EMIT_ARM && N_ARM)
+#endif
diff --git a/py/mpconfig.h b/py/mpconfig.h
index 633a4a52d..adbcb0eb7 100644
--- a/py/mpconfig.h
+++ b/py/mpconfig.h
@@ -101,6 +101,11 @@
 #define MICROPY_EMIT_X64 (0)
 #endif
 
+// Whether to emit x86 native code
+#ifndef MICROPY_EMIT_X86
+#define MICROPY_EMIT_X86 (0)
+#endif
+
 // Whether to emit thumb native code
 #ifndef MICROPY_EMIT_THUMB
 #define MICROPY_EMIT_THUMB (0)
@@ -117,7 +122,7 @@
 #endif
 
 // Convenience definition for whether any native emitter is enabled
-#define MICROPY_EMIT_NATIVE (MICROPY_EMIT_X64 || MICROPY_EMIT_THUMB || MICROPY_EMIT_ARM)
+#define MICROPY_EMIT_NATIVE (MICROPY_EMIT_X64 || MICROPY_EMIT_X86 || MICROPY_EMIT_THUMB || MICROPY_EMIT_ARM)
 
 /*****************************************************************************/
 /* Compiler configuration                                                    */
diff --git a/py/py.mk b/py/py.mk
index 0720e434b..e2288d382 100644
--- a/py/py.mk
+++ b/py/py.mk
@@ -35,6 +35,8 @@ PY_O_BASENAME = \
 	emitbc.o \
 	asmx64.o \
 	emitnx64.o \
+	asmx86.o \
+	emitnx86.o \
 	asmthumb.o \
 	emitnthumb.o \
 	emitinlinethumb.o \
@@ -145,6 +147,10 @@ $(PY_BUILD)/emitnx64.o: CFLAGS += -DN_X64
 $(PY_BUILD)/emitnx64.o: py/emitnative.c
 	$(call compile_c)
 
+$(PY_BUILD)/emitnx86.o: CFLAGS += -DN_X86
+$(PY_BUILD)/emitnx86.o: py/emitnative.c
+	$(call compile_c)
+
 $(PY_BUILD)/emitnthumb.o: CFLAGS += -DN_THUMB
 $(PY_BUILD)/emitnthumb.o: py/emitnative.c
 	$(call compile_c)
diff --git a/py/qstrdefs.h b/py/qstrdefs.h
index 1acf63994..d41029a1f 100644
--- a/py/qstrdefs.h
+++ b/py/qstrdefs.h
@@ -73,7 +73,7 @@ Q(micropython)
 Q(bytecode)
 Q(const)
 
-#if MICROPY_EMIT_X64 || MICROPY_EMIT_THUMB || MICROPY_EMIT_ARM
+#if MICROPY_EMIT_NATIVE
 Q(native)
 Q(viper)
 Q(uint)
diff --git a/unix/mpconfigport.h b/unix/mpconfigport.h
index 1a57eb251..38da3fcbc 100644
--- a/unix/mpconfigport.h
+++ b/unix/mpconfigport.h
@@ -30,6 +30,9 @@
 #if !defined(MICROPY_EMIT_X64) && defined(__x86_64__)
     #define MICROPY_EMIT_X64        (1)
 #endif
+#if !defined(MICROPY_EMIT_X86) && defined(__i386__)
+    #define MICROPY_EMIT_X86        (1)
+#endif
 #define MICROPY_EMIT_THUMB          (0)
 #define MICROPY_EMIT_INLINE_THUMB   (0)
 #define MICROPY_ENABLE_GC           (1)
-- 
GitLab