From 1ef2348df0c15f9924d3b5be798fd20805ccd5aa Mon Sep 17 00:00:00 2001
From: Damien George <damien.p.george@gmail.com>
Date: Sun, 12 Oct 2014 14:21:06 +0100
Subject: [PATCH] py: Implement and,or,xor native ops for viper.

---
 py/asmarm.c                                | 30 ++++++++++++++++++++++
 py/asmarm.h                                |  3 +++
 py/asmx64.c                                | 10 ++++++++
 py/asmx64.h                                |  2 ++
 py/asmx86.c                                | 10 ++++++++
 py/asmx86.h                                |  2 ++
 py/emitnative.c                            | 21 +++++++++++++++
 tests/micropython/viper_binop_arith.py     | 22 ++++++++++++++++
 tests/micropython/viper_binop_arith.py.exp | 10 ++++++++
 9 files changed, 110 insertions(+)

diff --git a/py/asmarm.c b/py/asmarm.c
index 3fac94285..8d328f2ca 100644
--- a/py/asmarm.c
+++ b/py/asmarm.c
@@ -175,6 +175,21 @@ STATIC uint asm_arm_op_sub_reg(uint rd, uint rn, uint rm) {
     return 0x0400000 | (rn << 16) | (rd << 12) | rm;
 }
 
+STATIC uint asm_arm_op_and_reg(uint rd, uint rn, uint rm) {
+    // and rd, rn, rm
+    return 0x0000000 | (rn << 16) | (rd << 12) | rm;
+}
+
+STATIC uint asm_arm_op_eor_reg(uint rd, uint rn, uint rm) {
+    // eor rd, rn, rm
+    return 0x0200000 | (rn << 16) | (rd << 12) | rm;
+}
+
+STATIC uint asm_arm_op_orr_reg(uint rd, uint rn, uint rm) {
+    // orr rd, rn, rm
+    return 0x1800000 | (rn << 16) | (rd << 12) | rm;
+}
+
 void asm_arm_bkpt(asm_arm_t *as) {
     // bkpt #0
     emit_al(as, 0x1200070); 
@@ -312,6 +327,21 @@ void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
     emit_al(as, asm_arm_op_sub_reg(rd, rn, rm));
 }
 
+void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+    // and rd, rn, rm
+    emit_al(as, asm_arm_op_and_reg(rd, rn, rm));
+}
+
+void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+    // eor rd, rn, rm
+    emit_al(as, asm_arm_op_eor_reg(rd, rn, rm));
+}
+
+void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+    // orr rd, rn, rm
+    emit_al(as, asm_arm_op_orr_reg(rd, rn, rm));
+}
+
 void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num) {
     // add rd, sp, #local_num*4
     emit_al(as, asm_arm_op_add_imm(rd, ASM_ARM_REG_SP, local_num << 2));
diff --git a/py/asmarm.h b/py/asmarm.h
index 3829938ae..d7180beda 100644
--- a/py/asmarm.h
+++ b/py/asmarm.h
@@ -96,6 +96,9 @@ void asm_arm_cmp_reg_reg(asm_arm_t *as, uint rd, uint rn);
 // arithmetic
 void asm_arm_add_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
 void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
 void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num);
 void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs);
 void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs);
diff --git a/py/asmx64.c b/py/asmx64.c
index bca825b3c..6f352b117 100644
--- a/py/asmx64.c
+++ b/py/asmx64.c
@@ -53,6 +53,8 @@
 #define OPCODE_MOV_R64_TO_RM64   (0x89) /* /r */
 #define OPCODE_MOV_RM64_TO_R64   (0x8b)
 #define OPCODE_LEA_MEM_TO_R64    (0x8d) /* /r */
+#define OPCODE_AND_R64_TO_RM64   (0x21) /* /r */
+#define OPCODE_OR_R64_TO_RM64    (0x09) /* /r */
 #define OPCODE_XOR_R64_TO_RM64   (0x31) /* /r */
 #define OPCODE_ADD_R64_TO_RM64   (0x01) /* /r */
 #define OPCODE_ADD_I32_TO_RM32   (0x81) /* /0 */
@@ -385,6 +387,14 @@ void asm_x64_mov_i64_to_r64_aligned(asm_x64_t *as, int64_t src_i64, int dest_r64
     asm_x64_mov_i64_to_r64(as, src_i64, dest_r64);
 }
 
+void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+    asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_AND_R64_TO_RM64);
+}
+
+void asm_x64_or_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+    asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_OR_R64_TO_RM64);
+}
+
 void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
     asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_XOR_R64_TO_RM64);
 }
diff --git a/py/asmx64.h b/py/asmx64.h
index 0d3f58ecd..8dce48c5c 100644
--- a/py/asmx64.h
+++ b/py/asmx64.h
@@ -86,6 +86,8 @@ void asm_x64_mov_i64_to_r64_aligned(asm_x64_t *as, int64_t src_i64, int dest_r64
 void asm_x64_mov_r8_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
 void asm_x64_mov_r16_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
 void asm_x64_mov_r64_to_disp(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
+void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+void asm_x64_or_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
 void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
 void asm_x64_shl_r64_cl(asm_x64_t* as, int dest_r64);
 void asm_x64_sar_r64_cl(asm_x64_t* as, int dest_r64);
diff --git a/py/asmx86.c b/py/asmx86.c
index 072998c67..30bd7933b 100644
--- a/py/asmx86.c
+++ b/py/asmx86.c
@@ -53,6 +53,8 @@
 #define OPCODE_MOV_R32_TO_RM32   (0x89)
 #define OPCODE_MOV_RM32_TO_R32   (0x8b)
 #define OPCODE_LEA_MEM_TO_R32    (0x8d) /* /r */
+#define OPCODE_AND_R32_TO_RM32   (0x21) /* /r */
+#define OPCODE_OR_R32_TO_RM32    (0x09) /* /r */
 #define OPCODE_XOR_R32_TO_RM32   (0x31) /* /r */
 #define OPCODE_ADD_R32_TO_RM32   (0x01)
 #define OPCODE_ADD_I32_TO_RM32   (0x81) /* /0 */
@@ -287,6 +289,14 @@ void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32
     asm_x86_mov_i32_to_r32(as, src_i32, dest_r32);
 }
 
+void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+    asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_AND_R32_TO_RM32);
+}
+
+void asm_x86_or_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+    asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_OR_R32_TO_RM32);
+}
+
 void asm_x86_xor_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
     asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_XOR_R32_TO_RM32);
 }
diff --git a/py/asmx86.h b/py/asmx86.h
index 2d83f3a65..380d1cbb2 100644
--- a/py/asmx86.h
+++ b/py/asmx86.h
@@ -83,6 +83,8 @@ void asm_x86_mov_i32_to_r32_aligned(asm_x86_t *as, int32_t src_i32, int dest_r32
 void asm_x86_mov_r8_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
 void asm_x86_mov_r16_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
 void asm_x86_mov_r32_to_disp(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
+void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+void asm_x86_or_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
 void asm_x86_xor_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
 void asm_x86_shl_r32_cl(asm_x86_t* as, int dest_r32);
 void asm_x86_sar_r32_cl(asm_x86_t* as, int dest_r32);
diff --git a/py/emitnative.c b/py/emitnative.c
index f435117c4..4e39f641d 100644
--- a/py/emitnative.c
+++ b/py/emitnative.c
@@ -145,6 +145,9 @@
 
 #define ASM_LSL_REG(as, reg) asm_x64_shl_r64_cl((as), (reg))
 #define ASM_ASR_REG(as, reg) asm_x64_sar_r64_cl((as), (reg))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x64_or_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x64_xor_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x64_and_r64_r64((as), (reg_dest), (reg_src))
 #define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x64_add_r64_r64((as), (reg_dest), (reg_src))
 #define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x64_sub_r64_r64((as), (reg_dest), (reg_src))
 
@@ -270,6 +273,9 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
 
 #define ASM_LSL_REG(as, reg) asm_x86_shl_r32_cl((as), (reg))
 #define ASM_ASR_REG(as, reg) asm_x86_sar_r32_cl((as), (reg))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x86_or_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x86_xor_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x86_and_r32_r32((as), (reg_dest), (reg_src))
 #define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x86_add_r32_r32((as), (reg_dest), (reg_src))
 #define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x86_sub_r32_r32((as), (reg_dest), (reg_src))
 
@@ -346,6 +352,9 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
 
 #define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_LSL, (reg_dest), (reg_shift))
 #define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ASR, (reg_dest), (reg_shift))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ORR, (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_EOR, (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_AND, (reg_dest), (reg_src))
 #define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_thumb_add_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
 #define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_thumb_sub_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
 
@@ -422,6 +431,9 @@ STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
 
 #define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_arm_lsl_reg_reg((as), (reg_dest), (reg_shift))
 #define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_arm_asr_reg_reg((as), (reg_dest), (reg_shift))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_arm_orr_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_arm_eor_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_arm_and_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
 #define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_arm_add_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
 #define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_arm_sub_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
 
@@ -1769,6 +1781,15 @@ STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
             ASM_ASR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
             emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
         #endif
+        } else if (op == MP_BINARY_OP_OR || op == MP_BINARY_OP_INPLACE_OR) {
+            ASM_OR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+            emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
+        } else if (op == MP_BINARY_OP_XOR || op == MP_BINARY_OP_INPLACE_XOR) {
+            ASM_XOR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+            emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
+        } else if (op == MP_BINARY_OP_AND || op == MP_BINARY_OP_INPLACE_AND) {
+            ASM_AND_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+            emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
         } else if (op == MP_BINARY_OP_ADD || op == MP_BINARY_OP_INPLACE_ADD) {
             ASM_ADD_REG_REG(emit->as, REG_ARG_2, reg_rhs);
             emit_post_push_reg(emit, VTYPE_INT, REG_ARG_2);
diff --git a/tests/micropython/viper_binop_arith.py b/tests/micropython/viper_binop_arith.py
index 137b8c104..d37450315 100644
--- a/tests/micropython/viper_binop_arith.py
+++ b/tests/micropython/viper_binop_arith.py
@@ -34,3 +34,25 @@ shr(1, 0)
 shr(1, 3)
 shr(42, 2)
 shr(-42, 2)
+
+@micropython.viper
+def and_(x:int, y:int):
+    print(x & y, y & x)
+and_(1, 0)
+and_(1, 3)
+and_(0xf0, 0x3f)
+and_(-42, 6)
+
+@micropython.viper
+def or_(x:int, y:int):
+    print(x | y, y | x)
+or_(1, 0)
+or_(1, 2)
+or_(-42, 5)
+
+@micropython.viper
+def xor(x:int, y:int):
+    print(x ^ y, y ^ x)
+xor(1, 0)
+xor(1, 2)
+xor(-42, 5)
diff --git a/tests/micropython/viper_binop_arith.py.exp b/tests/micropython/viper_binop_arith.py.exp
index f10e998cd..15abcc245 100644
--- a/tests/micropython/viper_binop_arith.py.exp
+++ b/tests/micropython/viper_binop_arith.py.exp
@@ -23,3 +23,13 @@
 0
 10
 -11
+0 0
+1 1
+48 48
+6 6
+1 1
+3 3
+-41 -41
+1 1
+3 3
+-45 -45
-- 
GitLab