Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
Loading items

Target

Select target project
  • card10/firmware
  • annejan/firmware
  • astro/firmware
  • fpletz/firmware
  • gerd/firmware
  • fleur/firmware
  • swym/firmware
  • l/firmware
  • uberardy/firmware
  • wink/firmware
  • madonius/firmware
  • mot/firmware
  • filid/firmware
  • q3k/firmware
  • hauke/firmware
  • Woazboat/firmware
  • pink/firmware
  • mossmann/firmware
  • omniskop/firmware
  • zenox/firmware
  • trilader/firmware
  • Danukeru/firmware
  • shoragan/firmware
  • zlatko/firmware
  • sistason/firmware
  • datenwolf/firmware
  • bene/firmware
  • amedee/firmware
  • martinling/firmware
  • griffon/firmware
  • chris007/firmware
  • adisbladis/firmware
  • dbrgn/firmware
  • jelly/firmware
  • rnestler/firmware
  • mh/firmware
  • ln/firmware
  • penguineer/firmware
  • monkeydom/firmware
  • jens/firmware
  • jnaulty/firmware
  • jeffmakes/firmware
  • marekventur/firmware
  • pete/firmware
  • h2obrain/firmware
  • DooMMasteR/firmware
  • jackie/firmware
  • prof_r/firmware
  • Draradech/firmware
  • Kartoffel/firmware
  • hinerk/firmware
  • abbradar/firmware
  • JustTB/firmware
  • LuKaRo/firmware
  • iggy/firmware
  • ente/firmware
  • flgr/firmware
  • Lorphos/firmware
  • matejo/firmware
  • ceddral7/firmware
  • danb/firmware
  • joshi/firmware
  • melle/firmware
  • fitch/firmware
  • deurknop/firmware
  • sargon/firmware
  • markus/firmware
  • kloenk/firmware
  • lucaswerkmeister/firmware
  • derf/firmware
  • meh/firmware
  • dx/card10-firmware
  • torben/firmware
  • yuvadm/firmware
  • AndyBS/firmware
  • klausdieter1/firmware
  • katzenparadoxon/firmware
  • xiretza/firmware
  • ole/firmware
  • techy/firmware
  • thor77/firmware
  • TilCreator/firmware
  • fuchsi/firmware
  • dos/firmware
  • yrlf/firmware
  • PetePriority/firmware
  • SuperVirus/firmware
  • sur5r/firmware
  • tazz/firmware
  • Alienmaster/firmware
  • flo_h/firmware
  • baldo/firmware
  • mmu_man/firmware
  • Foaly/firmware
  • sodoku/firmware
  • Guinness/firmware
  • ssp/firmware
  • led02/firmware
  • Stormwind/firmware
  • arist/firmware
  • coon/firmware
  • mdik/firmware
  • pippin/firmware
  • royrobotiks/firmware
  • zigot83/firmware
  • mo_k/firmware
106 results
Select Git revision
Loading items
Show changes
Showing
with 827 additions and 53 deletions
#include "os/mutex.h"
#include "os/core.h"
#include "epicardium.h"
#include "api/interrupt-sender.h"
#include "user_core/user_core.h"
#include <assert.h>
struct interrupt_priv {
/* Whether this interrupt can be triggered */
bool int_enabled[EPIC_INT_NUM];
/* Whether this interrupt is waiting to be delivered */
bool int_pending[EPIC_INT_NUM];
/* Whether any interrupts are currently waiting to be triggered */
bool has_pending;
};
static struct interrupt_priv interrupt_data;
static struct mutex interrupt_mutex;
static TaskHandle_t interrupts_task;
void interrupt_trigger(api_int_id_t id)
{
assert(id < EPIC_INT_NUM);
mutex_lock(&interrupt_mutex);
if (interrupt_data.int_enabled[id]) {
interrupt_data.int_pending[id] = true;
interrupt_data.has_pending = true;
mutex_unlock(&interrupt_mutex);
xTaskNotifyGive(interrupts_task);
} else {
mutex_unlock(&interrupt_mutex);
}
}
void interrupt_trigger_sync(api_int_id_t id)
{
assert(id < EPIC_INT_NUM);
mutex_lock(&interrupt_mutex);
if (!interrupt_data.int_enabled[id])
goto out;
while (!api_interrupt_is_ready())
;
api_interrupt_trigger(id);
/* Break the dispatcher task out of a potential call
* to epic_sleep() */
xTaskNotifyGive(dispatcher_task_id);
out:
mutex_unlock(&interrupt_mutex);
}
/*
* This function solely exists because of that one use of interrupts that breaks
* the rules: The RTC ALARM interrupt is triggered from a hardware ISR where
* interrupt_trigger_sync() won't work because it needs to lock a mutex.
*
* DO NOT USE THIS FUNCTION IN ANY NEW CODE.
*/
void __attribute__((deprecated)) interrupt_trigger_unsafe(api_int_id_t id)
{
assert(id < EPIC_INT_NUM);
if (!interrupt_data.int_enabled[id])
return;
while (!api_interrupt_is_ready())
;
api_interrupt_trigger(id);
}
static void interrupt_set_enabled(api_int_id_t id, bool enabled)
{
assert(id < EPIC_INT_NUM);
mutex_lock(&interrupt_mutex);
interrupt_data.int_enabled[id] = enabled;
mutex_unlock(&interrupt_mutex);
}
static bool interrupt_get_enabled(api_int_id_t id)
{
assert(id < EPIC_INT_NUM);
bool enabled;
mutex_lock(&interrupt_mutex);
enabled = interrupt_data.int_enabled[id];
mutex_unlock(&interrupt_mutex);
return enabled;
}
void interrupt_init(void)
{
if (interrupt_mutex.name == NULL)
mutex_create(&interrupt_mutex);
api_interrupt_init();
/* Reset all irqs to disabled */
for (size_t i = 0; i < EPIC_INT_NUM; i++) {
interrupt_set_enabled(i, false);
}
/* Reset interrupt is always enabled */
interrupt_set_enabled(EPIC_INT_RESET, true);
}
/* Epic-calls {{{ */
int epic_interrupt_enable(api_int_id_t int_id)
{
if (int_id >= EPIC_INT_NUM) {
return -EINVAL;
}
interrupt_set_enabled(int_id, true);
return 0;
}
int epic_interrupt_disable(api_int_id_t int_id)
{
if (int_id >= EPIC_INT_NUM || int_id == EPIC_INT_RESET) {
return -EINVAL;
}
interrupt_set_enabled(int_id, false);
return 0;
}
int epic_interrupt_is_enabled(api_int_id_t int_id, bool *enabled)
{
if (int_id >= EPIC_INT_NUM) {
return -EINVAL;
}
*enabled = interrupt_get_enabled(int_id);
return 0;
}
/* }}} */
void vInterruptsTask(void *pvParameters)
{
interrupts_task = xTaskGetCurrentTaskHandle();
while (true) {
mutex_lock(&interrupt_mutex);
if (!interrupt_data.has_pending) {
/* Wait for a wakeup event from interrupt_trigger() */
mutex_unlock(&interrupt_mutex);
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
mutex_lock(&interrupt_mutex);
}
while (!api_interrupt_is_ready()) {
mutex_unlock(&interrupt_mutex);
vTaskDelay(pdMS_TO_TICKS(5));
mutex_lock(&interrupt_mutex);
}
api_int_id_t current_irq = EPIC_INT_NUM;
for (size_t i = 0; i < EPIC_INT_NUM; i++) {
if (interrupt_data.int_pending[i]) {
current_irq = i;
interrupt_data.int_pending[i] = false;
break;
}
}
if (current_irq == EPIC_INT_NUM) {
interrupt_data.has_pending = false;
} else if (interrupt_data.int_enabled[current_irq]) {
api_interrupt_trigger(current_irq);
/* Break the dispatcher task out of a potential call
* to epic_sleep() */
xTaskNotifyGive(dispatcher_task_id);
}
mutex_unlock(&interrupt_mutex);
}
}
#pragma once
#include "epicardium.h"
/* ---------- Interrupts --------------------------------------------------- */
void interrupt_init(void);
void interrupt_trigger(api_int_id_t id);
void interrupt_trigger_sync(api_int_id_t id);
void interrupt_trigger_unsafe(api_int_id_t id) __attribute__((deprecated(
"interrupt_trigger_unsafe() is racy and only exists for legacy code."
)));
void vInterruptsTask(void *pvParameters);
#include "epicardium.h"
#include "os/core.h"
#include "modules/modules.h"
#include "os/config.h"
#include "os/mutex.h"
#include "user_core/user_core.h"
#include "api/dispatcher.h"
#include "l0der/l0der.h"
#include "card10.h"
#include "FreeRTOS.h"
#include "task.h"
#include <assert.h>
#include <string.h>
#include <stdbool.h>
#include <stdbool.h>
#define PYCARDIUM_IVT (void *)0x100a0000
#define BLOCK_WAIT pdMS_TO_TICKS(1000)
/*
* Loading an empty filename into Pycardium will drop straight into the
* interpreter. This define is used to make it more clear when we intend
* to go into the interpreter.
*/
#define PYINTERPRETER ""
static TaskHandle_t lifecycle_task = NULL;
static struct mutex core1_mutex = { 0 };
enum payload_type {
PL_INVALID = 0,
PL_PYTHON_SCRIPT = 1,
PL_PYTHON_DIR = 2,
PL_PYTHON_INTERP = 3,
PL_L0DABLE = 4,
};
struct load_info {
bool do_reset;
enum payload_type type;
char name[256];
};
static volatile struct load_info async_load = {
.do_reset = false,
.name = { 0 },
.type = PL_INVALID,
};
/* Whether to write the menu script before attempting to load. */
static volatile bool write_menu = false;
static bool execute_elfs = false;
/* Helpers {{{ */
/*
* Check if the payload is a valid file (or module) and if so, return its type.
*/
static int load_stat(char *name)
{
size_t name_len = strlen(name);
if (name_len == 0) {
return PL_PYTHON_INTERP;
}
struct epic_stat stat;
if (epic_file_stat(name, &stat) < 0) {
return -ENOENT;
}
if (stat.type == EPICSTAT_DIR) {
/* This might be a python module. */
return PL_PYTHON_DIR;
}
if (strcmp(name + name_len - 3, ".py") == 0) {
/* A python script */
return PL_PYTHON_SCRIPT;
} else if (strcmp(name + name_len - 4, ".elf") == 0) {
return PL_L0DABLE;
}
return -ENOEXEC;
}
/*
* Actually load a payload into core 1. Optionally reset the core first.
*/
static int do_load(struct load_info *info)
{
struct l0dable_info l0dable;
int res;
/* Callers of do_load() must first lock the core1_mutex. */
mutex_assert_locked(&core1_mutex);
if (*info->name == '\0') {
LOG_INFO("lifecycle", "Loading Python interpreter ...");
} else {
LOG_INFO("lifecycle", "Loading \"%s\" ...", info->name);
}
if (info->type == PL_L0DABLE && !execute_elfs) {
LOG_WARN(
"lifecycle", "Execution of .elf l0dables is disabled."
);
return -EPERM;
}
/* Signal the dispatcher to return early from applicable API calls. */
xTaskNotifyGive(dispatcher_task_id);
mutex_lock(&api_mutex);
if (info->do_reset) {
LOG_DEBUG("lifecycle", "Triggering core 1 reset.");
core1_trigger_reset();
}
/*
* Wait for the core to become ready to accept a new payload.
*
* If it is not yet ready, hand back control of the API mutex to the
* dispatcher so it can finish dispatching a current API call. This is
* necessary for payloads which have interrupts disabled during an API
* call.
*/
while (!core1_is_ready()) {
/*
* Wake up the dispatcher task prematurely. This is needed so
* the second xTaskNotifyGive() below can then break out the
* dispatcher from e.g. an epic_sleep() call.
*/
xTaskNotifyGive(dispatcher_task_id);
mutex_unlock(&api_mutex);
/* Sleep so the dispatcher task can take the lock. */
vTaskDelay(8);
/* Signal the dispatcher to return early from applicable API calls. */
xTaskNotifyGive(dispatcher_task_id);
mutex_lock(&api_mutex);
}
/*
* Reinitialize Hardware & Drivers
*/
res = hardware_reset();
if (res < 0) {
goto out_free_api;
}
switch (info->type) {
case PL_PYTHON_SCRIPT:
case PL_PYTHON_DIR:
case PL_PYTHON_INTERP:
core1_load(PYCARDIUM_IVT, info->name);
break;
case PL_L0DABLE:
assert(execute_elfs);
res = l0der_load_path(info->name, &l0dable);
if (res != 0) {
LOG_ERR("lifecycle", "l0der failed: %d\n", res);
res = -ENOEXEC;
goto out_free_api;
}
core1_load(l0dable.isr_vector, "");
break;
default:
LOG_ERR("lifecyle",
"Attempted to load invalid payload (%s)",
info->name);
res = -EINVAL;
goto out_free_api;
}
res = 0;
out_free_api:
mutex_unlock(&api_mutex);
return res;
}
/*
* Do a synchroneous load.
*/
static int load_sync(char *name, bool reset)
{
/* Callers of load_sync() must first lock the core1_mutex. */
mutex_assert_locked(&core1_mutex);
int ret = load_stat(name);
if (ret < 0) {
return ret;
}
struct load_info info = {
.name = { 0 },
.type = ret,
.do_reset = reset,
};
strncpy(info.name, name, sizeof(info.name));
return do_load(&info);
}
/*
* Do an asynchroneous load. This will return immediately if the payload seems
* valid and call the lifecycle task to actually perform the load later.
*/
static int load_async(char *name, bool reset)
{
/* Callers of load_async() must first lock the core1_mutex. */
mutex_assert_locked(&core1_mutex);
int ret = load_stat(name);
if (ret < 0) {
return ret;
}
async_load.type = ret;
async_load.do_reset = reset;
strncpy((char *)async_load.name, name, sizeof(async_load.name));
if (lifecycle_task != NULL) {
xTaskNotifyGive(lifecycle_task);
}
return 0;
}
/*
* Epicardium contains an embedded default menu script which it writes to
* external flash if none is found there. This way, you won't make your card10
* unusable by accidentally removing the menu script.
*
* You can find the sources for the menu-script in `preload/menu.py`.
*/
/*
* Embed the menu.py script in the Epicardium binary.
*/
__asm(".section \".rodata\"\n"
"_menu_script_start:\n"
".incbin \"../preload/menu.py\"\n"
"_menu_script_end:\n"
".previous\n");
extern const uint8_t _menu_script_start;
extern const uint8_t _menu_script_end;
static int write_default_menu(void)
{
const size_t length =
(uintptr_t)&_menu_script_end - (uintptr_t)&_menu_script_start;
int ret;
LOG_INFO("lifecycle", "Writing default menu ...");
int fd = epic_file_open("menu.py", "w");
if (fd < 0) {
return fd;
}
ret = epic_file_write(fd, &_menu_script_start, length);
if (ret < 0) {
return ret;
}
ret = epic_file_close(fd);
if (ret < 0) {
return ret;
}
return 0;
}
/*
* Go back to the menu.
*/
static void load_menu(bool reset)
{
LOG_DEBUG("lifecycle", "Into the menu");
mutex_lock(&core1_mutex);
int ret = load_async("menu.py", reset);
if (ret < 0) {
LOG_WARN("lifecycle", "No menu script found.");
/* The lifecycle task will perform the write */
write_menu = true;
async_load.type = PL_PYTHON_SCRIPT;
async_load.do_reset = reset;
strncpy((char *)async_load.name,
"menu.py",
sizeof(async_load.name));
if (lifecycle_task != NULL) {
xTaskNotifyGive(lifecycle_task);
}
}
mutex_unlock(&core1_mutex);
}
/* Helpers }}} */
/* API {{{ */
/*
* Restart the firmware
*/
void epic_system_reset(void)
{
card10_reset();
}
/*
* This is NOT the epic_exec() called from Pycardium, but an implementation of
* the same call for use in Epicardium. This function is synchroneous and will
* wait until the call returns.
*/
int epic_exec(char *name)
{
mutex_lock(&core1_mutex);
int ret = load_sync(name, true);
mutex_unlock(&core1_mutex);
return ret;
}
/*
* This is the underlying call for epic_exec() from Pycardium. It is
* asynchroneous and will return early to allow Pycardium (or a l0dable) to jump
* to the reset handler.
*
* The lifecycle task will deal with actually loading the new payload.
*/
int __epic_exec(char *name)
{
mutex_lock(&core1_mutex);
int ret = load_async(name, false);
mutex_unlock(&core1_mutex);
return ret;
}
/*
* This is the underlying call for epic_exit() from Pycardium. It is
* asynchroneous and will return early to allow Pycardium (or a l0dable) to jump
* to the reset handler.
*
* The lifecycle task will deal with actually loading the new payload.
*/
void __epic_exit(int ret)
{
if (ret == 0) {
LOG_INFO("lifecycle", "Payload returned successfully");
} else {
LOG_WARN("lifecycle", "Payload returned with %d.", ret);
}
load_menu(false);
}
/*
* This function can be used in Epicardium to jump back to the menu.
*
* It is asynchroneous and will return immediately. The lifecycle task will
* take care of actually jumping back.
*/
void return_to_menu(void)
{
load_menu(true);
}
/* API }}} */
void vLifecycleTask(void *pvParameters)
{
lifecycle_task = xTaskGetCurrentTaskHandle();
mutex_create(&core1_mutex);
mutex_lock(&core1_mutex);
LOG_DEBUG("lifecycle", "Booting core 1 ...");
core1_boot();
vTaskDelay(pdMS_TO_TICKS(10));
mutex_unlock(&core1_mutex);
/*
* If `main.py` exists, start it. Otherwise, start `menu.py`.
*
* We are not using epic_exec() & return_to_menu() here because those
* trigger a reset which is undesirable during startup.
*/
mutex_lock(&core1_mutex);
int ret = load_sync("main.py", false);
mutex_unlock(&core1_mutex);
if (ret < 0) {
load_menu(false);
}
hardware_init();
execute_elfs = config_get_boolean_with_default("execute_elf", false);
/* When triggered, reset core 1 to menu */
while (1) {
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
mutex_lock(&core1_mutex);
if (write_menu) {
write_menu = false;
int ret = write_default_menu();
if (ret < 0) {
LOG_ERR("lifecycle",
"Failed to write default menu: %d",
ret);
load_async(PYINTERPRETER, "");
ulTaskNotifyTake(pdTRUE, 0);
}
}
ret = do_load((struct load_info *)&async_load);
mutex_unlock(&core1_mutex);
if (ret < 0) {
LOG_ERR("lifecycle", "Error loading payload: %d", ret);
return_to_menu();
}
}
}
user_core_sources = files(
'dispatcher.c',
'interrupts.c',
'lifecycle.c',
'migration.c',
)
#include "epicardium.h"
#include "os/core.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void migration_delete_app_launchers(void)
{
int fd = epic_file_opendir("/");
struct epic_stat entry;
for (;;) {
epic_file_readdir(fd, &entry);
if (entry.type == EPICSTAT_NONE) {
// End
break;
}
const char *dot = strrchr(entry.name, '.');
if (dot && !strcmp(dot, ".py")) {
const char launcher[] = "# Launcher script for ";
char launcher_buf[strlen(launcher)];
int fd = epic_file_open(entry.name, "r");
if (fd >= 0) {
int n = epic_file_read(
fd, launcher_buf, sizeof(launcher_buf)
);
epic_file_close(fd);
if (n == (int)sizeof(launcher_buf) &&
!memcmp(launcher,
launcher_buf,
sizeof(launcher_buf))) {
LOG_INFO(
"migration",
"Delete old launcher %s",
entry.name
);
epic_file_unlink(entry.name);
}
}
}
}
epic_file_close(fd);
}
#pragma once
#include "FreeRTOS.h"
#include "os/mutex.h"
/* ---------- Dispatcher --------------------------------------------------- */
void vApiDispatcher(void *pvParameters);
void dispatcher_mutex_init(void);
extern struct mutex api_mutex;
extern TaskHandle_t dispatcher_task_id;
/* ---------- Lifecycle ---------------------------------------------------- */
void vLifecycleTask(void *pvParameters);
void return_to_menu(void);
/* ---------- Migration ---------------------------------------------------- */
void migration_delete_app_launchers(void);
epicardium/version-splash.png

15 KiB

source init.gdb
set confirm off
mon max32xxx mass_erase 0
mon max32xxx mass_erase 1
echo #### BOOTLOADER ####\n
load build/bootloader/bootloader.elf
echo #### EPICARDIUM ####\n
load build/epicardium/epicardium.elf
echo #### PYCARDIUM ####\n
load build/pycardium/pycardium.elf
reset
quit
source init.gdb
set confirm off
echo #### BOOTLOADER ####\n
load build/bootloader/bootloader.elf
reset
quit
source init.gdb
set confirm off
echo #### EPICARDIUM ####\n
load build/epicardium/epicardium.elf
echo #### PYCARDIUM ####\n
load build/pycardium/pycardium.elf
reset
quit
source init.gdb
set confirm off
echo #### EPICARDIUM ####\n
load build/epicardium/epicardium.elf
reset
quit
source init.gdb
set confirm off
echo #### PYCARDIUM ####\n
load build/pycardium/pycardium.elf
reset
quit
......@@ -6,7 +6,8 @@
void *api_call_start(uint32_t id, void *args, uint32_t size)
{
// aquire semaphore
while (E_BUSY == SEMA_GetSema (API_CALL_SEMA)) ;
while (E_BUSY == SEMA_GetSema(API_CALL_SEMA))
;
ApiCallSpace->id = id;
ApiCallSpace->returning = 0;
......@@ -20,7 +21,8 @@ void* api_call_bother_dispatcher (void* buf)
while (1) {
// aquire semaphore
while (E_BUSY == SEMA_GetSema (API_CALL_SEMA)) ;
while (E_BUSY == SEMA_GetSema(API_CALL_SEMA))
;
if (ApiCallSpace->returning == 1) {
break;
}
......
......@@ -17,7 +17,8 @@ void __api_dispatch_call(uint32_t id, void*buffer);
void api_dispatcher()
{
while (SEMA_GetSema(API_CALL_SEMA) == E_BUSY) {}
while (SEMA_GetSema(API_CALL_SEMA) == E_BUSY) {
}
if (ApiCallSpace->returning == 1) {
SEMA_FreeSema(API_CALL_SEMA);
......
......@@ -7,7 +7,9 @@
#include "api/api_dispatcher.h"
static const gpio_cfg_t motor_pin = {PORT_0, PIN_8, GPIO_FUNC_OUT, GPIO_PAD_NONE};
static const gpio_cfg_t motor_pin = {
PORT_0, PIN_8, GPIO_FUNC_OUT, GPIO_PAD_NONE
};
void api_set_buzzer(uint8_t state)
{
......@@ -23,7 +25,10 @@ void api_set_buzzer(uint8_t state)
void api_set_led(uint8_t led, led_color_t color)
{
printf("API: Changing color of led %d.\n", led);
printf("Color { r: %3d, g: %3d, b: %3d }\n", color.red, color.green, color.blue);
printf("Color { r: %3d, g: %3d, b: %3d }\n",
color.red,
color.green,
color.blue);
leds_set(led, color.red, color.green, color.blue);
leds_update();
}
......@@ -31,7 +36,10 @@ void api_set_led(uint8_t led, led_color_t color)
void api_test(char test0, short test1, int test2, long test3)
{
printf("test0: %x, test1: %d, test2: %x, test3: %lx\n",
test0, (int)test1, test2, test3);
test0,
(int)test1,
test2,
test3);
}
int main(void)
......@@ -44,7 +52,6 @@ int main(void)
TMR_Delay(MXC_TMR1, MSEC(100), 0);
}
#if 0
// Enable rxev on core1
MXC_GCR->evten |= 0x20;
......
source ../../.gdbinit
source ../../init.gdb
set confirm off
......
......@@ -50,18 +50,19 @@ def main():
f_client = cx.enter_context(open(args.client, "w"))
f_server = cx.enter_context(open(args.server, "w"))
print('#include "{}"\n'.format(
os.path.basename(args.header)
), file=f_client)
print('#include "{}"\n'.format(os.path.basename(args.header)), file=f_client)
print("""\
print(
"""\
#include "{}"
void __api_dispatch_call(uint32_t id, void*buffer)
{{
switch (id) {{""".format(
os.path.basename(args.header)
), file=f_server)
),
file=f_server,
)
for match in matcher.finditer(source):
api_id = match.group("id")
......@@ -100,9 +101,12 @@ void {cdecl}({cargs})
file=f_client,
)
print("""\
print(
"""\
case {id}:
{cdecl}(""".format(id=api_id, cdecl=api_decl),
{cdecl}(""".format(
id=api_id, cdecl=api_decl
),
file=f_server,
)
......@@ -122,18 +126,17 @@ void {cdecl}({cargs})
print(
"""\
*({type}*)(buffer + {offset})""".format(
type=ty,
offset=" + ".join(api_args_sizes[:i]) if i > 0 else "0",
type=ty, offset=" + ".join(api_args_sizes[:i]) if i > 0 else "0"
),
file=f_server,
end="",
)
print("""
print(
"""
);
break;""".format(
cdecl=api_decl,
args=", ".join(api_args_names),
cdecl=api_decl, args=", ".join(api_args_names)
),
file=f_server,
)
......@@ -154,14 +157,17 @@ void {cdecl}({cargs})
file=f_client,
)
print("""\
print(
"""\
default:
printf("Error: API function %x is unknown!!\\n", {id});
break;
}}
}}""".format(
id=api_id,
), file=f_server)
id=api_id
),
file=f_server,
)
if __name__ == "__main__":
......
file ../../build/hw-tests/bmatest/bmatest.elf
source ../../.gdbinit
source ../../init.gdb
file ../../build/hw-tests/bmatest/bmatest.elf