implement atomic opcode in AOT/JIT (#329)

This commit is contained in:
Xu Jun 2020-08-03 11:30:26 +08:00 committed by GitHub
parent cc05f8fb1c
commit 29e45e1527
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 1447 additions and 158 deletions

View File

@ -47,7 +47,7 @@ BraceWrapping:
AfterCaseLabel: false AfterCaseLabel: false
AfterClass: true AfterClass: true
AfterControlStatement: false AfterControlStatement: false
AfterEnum: true AfterEnum: false
AfterFunction: true AfterFunction: true
AfterNamespace: false AfterNamespace: false
AfterObjCDeclaration: false AfterObjCDeclaration: false

5
.gitignore vendored
View File

@ -1,9 +1,6 @@
.vscode .vscode
**/*build/ **/*build/
core/deps/lv_drivers core/deps/**
core/deps/llvm
core/deps/lvgl
core/deps/tlsf
core/shared/mem-alloc/tlsf core/shared/mem-alloc/tlsf
core/app-framework/wgl core/app-framework/wgl

View File

@ -13,6 +13,22 @@ typedef struct {
#define REG_SYM(symbol) { #symbol, (void*)symbol } #define REG_SYM(symbol) { #symbol, (void*)symbol }
#if WASM_ENABLE_BULK_MEMORY != 0 #if WASM_ENABLE_BULK_MEMORY != 0
#define REG_BULK_MEMORY_SYM() \
REG_SYM(aot_memory_init), \
REG_SYM(aot_data_drop),
#else
#define REG_BULK_MEMORY_SYM()
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
#include "wasm_shared_memory.h"
#define REG_ATOMIC_WAIT_SYM() \
REG_SYM(wasm_runtime_atomic_wait), \
REG_SYM(wasm_runtime_atomic_notify),
#else
#define REG_ATOMIC_WAIT_SYM()
#endif
#define REG_COMMON_SYMBOLS \ #define REG_COMMON_SYMBOLS \
REG_SYM(aot_set_exception_with_id), \ REG_SYM(aot_set_exception_with_id), \
REG_SYM(aot_invoke_native), \ REG_SYM(aot_invoke_native), \
@ -31,30 +47,8 @@ typedef struct {
REG_SYM(truncf), \ REG_SYM(truncf), \
REG_SYM(rint), \ REG_SYM(rint), \
REG_SYM(rintf), \ REG_SYM(rintf), \
REG_SYM(memset), \ REG_BULK_MEMORY_SYM() \
REG_SYM(memmove), \ REG_ATOMIC_WAIT_SYM()
REG_SYM(aot_memory_init), \
REG_SYM(aot_data_drop)
#else
#define REG_COMMON_SYMBOLS \
REG_SYM(aot_set_exception_with_id), \
REG_SYM(aot_invoke_native), \
REG_SYM(aot_call_indirect), \
REG_SYM(wasm_runtime_enlarge_memory), \
REG_SYM(wasm_runtime_set_exception), \
REG_SYM(fmin), \
REG_SYM(fminf), \
REG_SYM(fmax), \
REG_SYM(fmaxf), \
REG_SYM(ceil), \
REG_SYM(ceilf), \
REG_SYM(floor), \
REG_SYM(floorf), \
REG_SYM(trunc), \
REG_SYM(truncf), \
REG_SYM(rint), \
REG_SYM(rintf)
#endif
#define CHECK_RELOC_OFFSET(data_size) do { \ #define CHECK_RELOC_OFFSET(data_size) do { \
if (!check_reloc_offset(target_section_size, reloc_offset, data_size, \ if (!check_reloc_offset(target_section_size, reloc_offset, data_size, \

View File

@ -1124,6 +1124,9 @@ aot_set_exception_with_id(AOTModuleInstance *module_inst,
case EXCE_NATIVE_STACK_OVERFLOW: case EXCE_NATIVE_STACK_OVERFLOW:
aot_set_exception(module_inst, "native stack overflow"); aot_set_exception(module_inst, "native stack overflow");
break; break;
case EXCE_UNALIGNED_ATOMIC:
aot_set_exception(module_inst, "unaligned atomic");
break;
default: default:
break; break;
} }

View File

@ -31,6 +31,7 @@ typedef enum AOTExceptionID {
EXCE_UNINITIALIZED_ELEMENT, EXCE_UNINITIALIZED_ELEMENT,
EXCE_CALL_UNLINKED_IMPORT_FUNC, EXCE_CALL_UNLINKED_IMPORT_FUNC,
EXCE_NATIVE_STACK_OVERFLOW, EXCE_NATIVE_STACK_OVERFLOW,
EXCE_UNALIGNED_ATOMIC,
EXCE_NUM, EXCE_NUM,
} AOTExceptionID; } AOTExceptionID;

View File

@ -56,7 +56,7 @@ void __aeabi_f2iz();
void __aeabi_f2d(); void __aeabi_f2d();
static SymbolMap target_sym_map[] = { static SymbolMap target_sym_map[] = {
REG_COMMON_SYMBOLS, REG_COMMON_SYMBOLS
/* compiler-rt symbols that come from compiler(e.g. gcc) */ /* compiler-rt symbols that come from compiler(e.g. gcc) */
REG_SYM(__divdi3), REG_SYM(__divdi3),
REG_SYM(__udivdi3), REG_SYM(__udivdi3),

View File

@ -55,7 +55,7 @@ void __aeabi_f2iz();
void __aeabi_f2d(); void __aeabi_f2d();
static SymbolMap target_sym_map[] = { static SymbolMap target_sym_map[] = {
REG_COMMON_SYMBOLS, REG_COMMON_SYMBOLS
/* compiler-rt symbols that come from compiler(e.g. gcc) */ /* compiler-rt symbols that come from compiler(e.g. gcc) */
REG_SYM(__divdi3), REG_SYM(__divdi3),
REG_SYM(__udivdi3), REG_SYM(__udivdi3),

View File

@ -14,7 +14,7 @@ void __moddi3();
void __umoddi3(); void __umoddi3();
static SymbolMap target_sym_map[] = { static SymbolMap target_sym_map[] = {
REG_COMMON_SYMBOLS, REG_COMMON_SYMBOLS
/* compiler-rt symbols that come from compiler(e.g. gcc) */ /* compiler-rt symbols that come from compiler(e.g. gcc) */
REG_SYM(__divdi3), REG_SYM(__divdi3),
REG_SYM(__udivdi3), REG_SYM(__udivdi3),

View File

@ -22,7 +22,7 @@ void __modsi3();
void __divdi3(); void __divdi3();
static SymbolMap target_sym_map[] = { static SymbolMap target_sym_map[] = {
REG_COMMON_SYMBOLS, REG_COMMON_SYMBOLS
/* API's for soft-float */ /* API's for soft-float */
/* TODO: only register these symbols when Floating-Point Coprocessor /* TODO: only register these symbols when Floating-Point Coprocessor

View File

@ -7,7 +7,12 @@
#include "wasm_runtime_common.h" #include "wasm_runtime_common.h"
#include "bh_log.h" #include "bh_log.h"
#if !defined(BH_PLATFORM_ZEPHYR) && !defined(BH_PLATFORM_ALIOS_THINGS)
#define ENABLE_QUICKSORT 1 #define ENABLE_QUICKSORT 1
#else
#define ENABLE_QUICKSORT 0
#endif
#define ENABLE_SORT_DEBUG 0 #define ENABLE_SORT_DEBUG 0
#if ENABLE_SORT_DEBUG != 0 #if ENABLE_SORT_DEBUG != 0

View File

@ -10,11 +10,50 @@ static bh_list shared_memory_list_head;
static bh_list *const shared_memory_list = &shared_memory_list_head; static bh_list *const shared_memory_list = &shared_memory_list_head;
static korp_mutex shared_memory_list_lock; static korp_mutex shared_memory_list_lock;
enum {
S_WAITING, S_NOTIFIED
};
typedef struct AtomicWaitInfo {
korp_mutex wait_list_lock;
bh_list wait_list_head;
bh_list *wait_list;
} AtomicWaitInfo;
typedef struct AtomicWaitNode {
bh_list_link l;
uint8 status;
korp_mutex wait_lock;
korp_cond wait_cond;
} AtomicWaitNode;
/* Atomic wait map */
static HashMap *wait_map;
static uint32
wait_address_hash(void *address);
static bool
wait_address_equal(void *h1, void *h2);
static void
destroy_wait_info(void *wait_info);
bool bool
wasm_shared_memory_init() wasm_shared_memory_init()
{ {
if (os_mutex_init(&shared_memory_list_lock) != 0) if (os_mutex_init(&shared_memory_list_lock) != 0)
return false; return false;
/* wait map not exists, create new map */
if (!(wait_map =
bh_hash_map_create(32, true,
(HashFunc)wait_address_hash,
(KeyEqualFunc)wait_address_equal,
NULL, destroy_wait_info))) {
os_mutex_destroy(&shared_memory_list_lock);
return false;
}
return true; return true;
} }
@ -22,6 +61,9 @@ void
wasm_shared_memory_destroy() wasm_shared_memory_destroy()
{ {
os_mutex_destroy(&shared_memory_list_lock); os_mutex_destroy(&shared_memory_list_lock);
if (wait_map) {
bh_hash_map_destroy(wait_map);
}
} }
static WASMSharedMemNode* static WASMSharedMemNode*
@ -118,3 +160,260 @@ shared_memory_set_memory_inst(WASMModuleCommon *module,
(void)ret; (void)ret;
return node; return node;
} }
/* Atomics wait && notify APIs */
static uint32
wait_address_hash(void *address)
{
return (uint32)(uintptr_t)address;
}
static bool
wait_address_equal(void *h1, void *h2)
{
return h1 == h2 ? true : false;
}
static bool
is_wait_node_exists(bh_list *wait_list, AtomicWaitNode *node)
{
AtomicWaitNode *curr;
curr = bh_list_first_elem(wait_list);
while (curr) {
if (curr == node) {
return true;
}
curr = bh_list_elem_next(curr);
}
return false;
}
static uint32
notify_wait_list(bh_list *wait_list, uint32 count)
{
AtomicWaitNode *node, *next;
uint32 i, notify_count = count;
if ((count == UINT32_MAX) || (count > wait_list->len))
notify_count = wait_list->len;
node = bh_list_first_elem(wait_list);
for (i = 0; i < count; i++) {
bh_assert(node);
next = bh_list_elem_next(node);
node->status = S_NOTIFIED;
/* wakeup */
os_cond_signal(&node->wait_cond);
node = next;
}
return notify_count;
}
static AtomicWaitInfo *
acquire_wait_info(void *address, bool create)
{
AtomicWaitInfo *wait_info = NULL;
bh_list_status ret;
wait_info = (AtomicWaitInfo *)
bh_hash_map_find(wait_map, address);
if (!create)
return wait_info;
/* No wait info on this address, create new info */
if (!wait_info) {
if (!(wait_info =
(AtomicWaitInfo *)wasm_runtime_malloc(sizeof(AtomicWaitInfo))))
return NULL;
memset(wait_info, 0, sizeof(AtomicWaitInfo));
/* init wait list */
wait_info->wait_list = &wait_info->wait_list_head;
ret = bh_list_init(wait_info->wait_list);
bh_assert(ret == BH_LIST_SUCCESS);
/* init wait list lock */
if (0 != os_mutex_init(&wait_info->wait_list_lock)) {
wasm_runtime_free(wait_info);
return NULL;
}
if (!bh_hash_map_insert(wait_map, address,
(void *)wait_info)) {
os_mutex_destroy(&wait_info->wait_list_lock);
wasm_runtime_free(wait_info);
return NULL;
}
}
bh_assert(wait_info);
(void)ret;
return wait_info;
}
static void
destroy_wait_info(void *wait_info)
{
AtomicWaitNode *node, *next;
if (wait_info) {
node = bh_list_first_elem(((AtomicWaitInfo *)wait_info)->wait_list);
while (node) {
next = bh_list_elem_next(node);
os_mutex_destroy(&node->wait_lock);
os_cond_destroy(&node->wait_cond);
wasm_runtime_free(node);
node = next;
}
os_mutex_destroy(&((AtomicWaitInfo *)wait_info)->wait_list_lock);
wasm_runtime_free(wait_info);
}
}
static void
release_wait_info(HashMap *wait_map,
AtomicWaitInfo *wait_info, void *address)
{
if (wait_info->wait_list->len == 0) {
bh_hash_map_remove(wait_map, address, NULL, NULL);
destroy_wait_info(wait_info);
}
}
uint32
wasm_runtime_atomic_wait(WASMModuleInstanceCommon *module, void *address,
uint64 expect, int64 timeout, bool wait64)
{
AtomicWaitInfo *wait_info;
AtomicWaitNode *wait_node;
bool check_ret, is_timeout;
#if WASM_ENABLE_INTERP != 0
if (module->module_type == Wasm_Module_Bytecode) {
WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
/* Currently we have only one memory instance */
if (!module_inst->memories[0]->is_shared) {
wasm_runtime_set_exception(module, "wait on unshared memory");
return -1;
}
}
#endif
#if WASM_ENABLE_AOT != 0
if (module->module_type == Wasm_Module_AoT) {
AOTModuleInstance *aot_inst = (AOTModuleInstance *)module;
AOTMemoryInstance *aot_memory =
((AOTMemoryInstance **)aot_inst->memories.ptr)[0];
/* Currently we have only one memory instance */
if (!aot_memory->is_shared) {
wasm_runtime_set_exception(module, "wait on unshared memory");
return -1;
}
}
#endif
/* acquire the wait info, create new one if not exists */
wait_info = acquire_wait_info(address, true);
if (!wait_info) {
wasm_runtime_set_exception(module, "failed to acquire wait_info");
return -1;
}
os_mutex_lock(&wait_info->wait_list_lock);
if ((!wait64 && *(uint32*)address != (uint32)expect)
|| (wait64 && *(uint64*)address != expect)) {
os_mutex_unlock(&wait_info->wait_list_lock);
return 1;
}
else {
bh_list_status ret;
if (!(wait_node = wasm_runtime_malloc(sizeof(AtomicWaitNode)))) {
wasm_runtime_set_exception(module, "failed to create wait node");
os_mutex_unlock(&wait_info->wait_list_lock);
return -1;
}
memset(wait_node, 0, sizeof(AtomicWaitNode));
if (0 != os_mutex_init(&wait_node->wait_lock)) {
wasm_runtime_free(wait_node);
os_mutex_unlock(&wait_info->wait_list_lock);
return -1;
}
if (0 != os_cond_init(&wait_node->wait_cond)) {
os_mutex_destroy(&wait_node->wait_lock);
wasm_runtime_free(wait_node);
os_mutex_unlock(&wait_info->wait_list_lock);
return -1;
}
wait_node->status = S_WAITING;
ret = bh_list_insert(wait_info->wait_list, wait_node);
bh_assert(ret == BH_LIST_SUCCESS);
(void)ret;
}
os_mutex_unlock(&wait_info->wait_list_lock);
/* condition wait start */
os_mutex_lock(&wait_node->wait_lock);
if (timeout < 0)
timeout = BHT_WAIT_FOREVER;
os_cond_reltimedwait(&wait_node->wait_cond,
&wait_node->wait_lock, timeout);
os_mutex_unlock(&wait_node->wait_lock);
/* Check the wait node status */
os_mutex_lock(&wait_info->wait_list_lock);
check_ret = is_wait_node_exists(wait_info->wait_list, wait_node);
bh_assert(check_ret);
is_timeout = wait_node->status == S_WAITING ? true : false;
bh_list_remove(wait_info->wait_list, wait_node);
os_mutex_destroy(&wait_node->wait_lock);
os_cond_destroy(&wait_node->wait_cond);
wasm_runtime_free(wait_node);
os_mutex_unlock(&wait_info->wait_list_lock);
release_wait_info(wait_map, wait_info, address);
(void)check_ret;
return is_timeout ? 2 : 0;
}
uint8
wasm_runtime_atomic_notify(WASMModuleInstanceCommon *module,
void *address, uint32 count)
{
uint32 notify_result;
AtomicWaitInfo *wait_info;
/* Nobody wait on this address */
wait_info = acquire_wait_info(address, false);
if (!wait_info)
return 0;
os_mutex_lock(&wait_info->wait_list_lock);
notify_result = notify_wait_list(wait_info->wait_list, count);
os_mutex_unlock(&wait_info->wait_list_lock);
release_wait_info(wait_map, wait_info, address);
return notify_result;
}

View File

@ -53,6 +53,13 @@ WASMSharedMemNode*
shared_memory_set_memory_inst(WASMModuleCommon *module, shared_memory_set_memory_inst(WASMModuleCommon *module,
WASMMemoryInstanceCommon *memory); WASMMemoryInstanceCommon *memory);
uint32
wasm_runtime_atomic_wait(WASMModuleInstanceCommon *module, void *address,
uint64 expect, int64 timeout, bool wait64);
uint8
wasm_runtime_atomic_notify(WASMModuleInstanceCommon *module,
void *address, uint32 count);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -86,6 +86,38 @@ read_leb(const uint8 *buf, const uint8 *buf_end,
res = (int64)res64; \ res = (int64)res64; \
} while (0) } while (0)
#define COMPILE_ATOMIC_RMW(OP, NAME) \
case WASM_OP_ATOMIC_RMW_I32_##NAME: \
bytes = 4; \
op_type = VALUE_TYPE_I32; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I64_##NAME: \
bytes = 8; \
op_type = VALUE_TYPE_I64; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I32_##NAME##8_U: \
bytes = 1; \
op_type = VALUE_TYPE_I32; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I32_##NAME##16_U: \
bytes = 2; \
op_type = VALUE_TYPE_I32; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I64_##NAME##8_U: \
bytes = 1; \
op_type = VALUE_TYPE_I64; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I64_##NAME##16_U: \
bytes = 2; \
op_type = VALUE_TYPE_I64; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I64_##NAME##32_U: \
bytes = 4; \
op_type = VALUE_TYPE_I64; \
OP_ATOMIC_##OP: \
bin_op = LLVMAtomicRMWBinOp##OP; \
goto build_atomic_rmw;
static bool static bool
aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
{ {
@ -286,7 +318,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_uint32(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i32_load(comp_ctx, func_ctx, align, offset, if (!aot_compile_op_i32_load(comp_ctx, func_ctx, align, offset,
bytes, sign)) bytes, sign, false))
return false; return false;
break; break;
@ -312,7 +344,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_uint32(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i64_load(comp_ctx, func_ctx, align, offset, if (!aot_compile_op_i64_load(comp_ctx, func_ctx, align, offset,
bytes, sign)) bytes, sign, false))
return false; return false;
break; break;
@ -341,7 +373,8 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
op_i32_store: op_i32_store:
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_uint32(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i32_store(comp_ctx, func_ctx, align, offset, bytes)) if (!aot_compile_op_i32_store(comp_ctx, func_ctx, align,
offset, bytes, false))
return false; return false;
break; break;
@ -359,7 +392,8 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
op_i64_store: op_i64_store:
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_uint32(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i64_store(comp_ctx, func_ctx, align, offset, bytes)) if (!aot_compile_op_i64_store(comp_ctx, func_ctx, align,
offset, bytes, false))
return false; return false;
break; break;
@ -810,7 +844,152 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
default: default:
break; break;
} }
break;
} }
#if WASM_ENABLE_SHARED_MEMORY != 0
case WASM_OP_ATOMIC_PREFIX:
{
uint8 bin_op, op_type;
if (frame_ip < frame_ip_end) {
opcode = *frame_ip++;
}
if (opcode != WASM_OP_ATOMIC_FENCE) {
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
}
switch (opcode) {
case WASM_OP_ATOMIC_WAIT32:
if (!aot_compile_op_atomic_wait(comp_ctx, func_ctx, VALUE_TYPE_I32,
align, offset, 4))
return false;
break;
case WASM_OP_ATOMIC_WAIT64:
if (!aot_compile_op_atomic_wait(comp_ctx, func_ctx, VALUE_TYPE_I64,
align, offset, 8))
return false;
break;
case WASM_OP_ATOMIC_NOTIFY:
if (!aot_compiler_op_atomic_notify(comp_ctx, func_ctx, align,
offset, bytes))
return false;
break;
case WASM_OP_ATOMIC_I32_LOAD:
bytes = 4;
goto op_atomic_i32_load;
case WASM_OP_ATOMIC_I32_LOAD8_U:
bytes = 1;
goto op_atomic_i32_load;
case WASM_OP_ATOMIC_I32_LOAD16_U:
bytes = 2;
op_atomic_i32_load:
if (!aot_compile_op_i32_load(comp_ctx, func_ctx, align,
offset, bytes, sign, true))
return false;
break;
case WASM_OP_ATOMIC_I64_LOAD:
bytes = 8;
goto op_atomic_i64_load;
case WASM_OP_ATOMIC_I64_LOAD8_U:
bytes = 1;
goto op_atomic_i64_load;
case WASM_OP_ATOMIC_I64_LOAD16_U:
bytes = 2;
goto op_atomic_i64_load;
case WASM_OP_ATOMIC_I64_LOAD32_U:
bytes = 4;
op_atomic_i64_load:
if (!aot_compile_op_i64_load(comp_ctx, func_ctx, align,
offset, bytes, sign, true))
return false;
break;
case WASM_OP_ATOMIC_I32_STORE:
bytes = 4;
goto op_atomic_i32_store;
case WASM_OP_ATOMIC_I32_STORE8:
bytes = 1;
goto op_atomic_i32_store;
case WASM_OP_ATOMIC_I32_STORE16:
bytes = 2;
op_atomic_i32_store:
if (!aot_compile_op_i32_store(comp_ctx, func_ctx, align,
offset, bytes, true))
return false;
break;
case WASM_OP_ATOMIC_I64_STORE:
bytes = 8;
goto op_atomic_i64_store;
case WASM_OP_ATOMIC_I64_STORE8:
bytes = 1;
goto op_atomic_i64_store;
case WASM_OP_ATOMIC_I64_STORE16:
bytes = 2;
goto op_atomic_i64_store;
case WASM_OP_ATOMIC_I64_STORE32:
bytes = 4;
op_atomic_i64_store:
if (!aot_compile_op_i64_store(comp_ctx, func_ctx, align,
offset, bytes, true))
return false;
break;
case WASM_OP_ATOMIC_RMW_I32_CMPXCHG:
bytes = 4;
op_type = VALUE_TYPE_I32;
goto op_atomic_cmpxchg;
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG:
bytes = 8;
op_type = VALUE_TYPE_I64;
goto op_atomic_cmpxchg;
case WASM_OP_ATOMIC_RMW_I32_CMPXCHG8_U:
bytes = 1;
op_type = VALUE_TYPE_I32;
goto op_atomic_cmpxchg;
case WASM_OP_ATOMIC_RMW_I32_CMPXCHG16_U:
bytes = 2;
op_type = VALUE_TYPE_I32;
goto op_atomic_cmpxchg;
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG8_U:
bytes = 1;
op_type = VALUE_TYPE_I64;
goto op_atomic_cmpxchg;
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG16_U:
bytes = 2;
op_type = VALUE_TYPE_I64;
goto op_atomic_cmpxchg;
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG32_U:
bytes = 4;
op_type = VALUE_TYPE_I64;
op_atomic_cmpxchg:
if (!aot_compile_op_atomic_cmpxchg(comp_ctx, func_ctx,
op_type, align,
offset, bytes))
return false;
break;
COMPILE_ATOMIC_RMW(Add, ADD);
COMPILE_ATOMIC_RMW(Sub, SUB);
COMPILE_ATOMIC_RMW(And, AND);
COMPILE_ATOMIC_RMW(Or, OR);
COMPILE_ATOMIC_RMW(Xor, XOR);
COMPILE_ATOMIC_RMW(Xchg, XCHG);
build_atomic_rmw:
if (!aot_compile_op_atomic_rmw(comp_ctx, func_ctx,
bin_op, op_type,
align, offset, bytes))
return false;
break;
default:
break;
}
break;
}
#endif /* end of WASM_ENABLE_SHARED_MEMORY */
default: default:
break; break;

View File

@ -18,7 +18,8 @@ static char *exce_block_names[] = {
"exce_undefined_element", /* EXCE_UNDEFINED_ELEMENT */ "exce_undefined_element", /* EXCE_UNDEFINED_ELEMENT */
"exce_uninit_element", /* EXCE_UNINITIALIZED_ELEMENT */ "exce_uninit_element", /* EXCE_UNINITIALIZED_ELEMENT */
"exce_call_unlinked", /* EXCE_CALL_UNLINKED_IMPORT_FUNC */ "exce_call_unlinked", /* EXCE_CALL_UNLINKED_IMPORT_FUNC */
"exce_native_stack_overflow" /* EXCE_NATIVE_STACK_OVERFLOW */ "exce_native_stack_overflow", /* EXCE_NATIVE_STACK_OVERFLOW */
"exce_unaligned_atomic" /* EXCE_UNALIGNED_ATOMIC */
}; };
bool bool

View File

@ -205,7 +205,7 @@ fail:
LLVMSetAlignment(value, 1); \ LLVMSetAlignment(value, 1); \
} while (0) } while (0)
#define BUILD_TRUNC(data_type) do { \ #define BUILD_TRUNC(value, data_type) do { \
if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, \ if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, \
data_type, "val_trunc"))){ \ data_type, "val_trunc"))){ \
aot_set_last_error("llvm build trunc failed."); \ aot_set_last_error("llvm build trunc failed."); \
@ -238,9 +238,79 @@ fail:
} \ } \
} while (0) } while (0)
#if WASM_ENABLE_SHARED_MEMORY != 0
bool
check_memory_alignment(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
LLVMValueRef addr, uint32 align)
{
LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
LLVMBasicBlockRef check_align_succ;
LLVMValueRef align_mask = I32_CONST(((uint32)1 << align) - 1);
LLVMValueRef res;
CHECK_LLVM_CONST(align_mask);
/* Convert pointer to int */
if (!(addr = LLVMBuildPtrToInt(comp_ctx->builder, addr,
I32_TYPE, "address"))) {
aot_set_last_error("llvm build ptr to int failed.");
goto fail;
}
/* The memory address should be aligned */
BUILD_OP(And, addr, align_mask, res, "and");
BUILD_ICMP(LLVMIntNE, res, I32_ZERO, res, "cmp");
/* Add basic blocks */
ADD_BASIC_BLOCK(check_align_succ, "check_align_succ");
LLVMMoveBasicBlockAfter(check_align_succ, block_curr);
if (!aot_emit_exception(comp_ctx, func_ctx,
EXCE_UNALIGNED_ATOMIC,
true, res, check_align_succ)) {
goto fail;
}
SET_BUILD_POS(check_align_succ);
return true;
fail:
return false;
}
#define BUILD_ATOMIC_LOAD(align) do { \
if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
goto fail; \
} \
if (!(value = LLVMBuildLoad(comp_ctx->builder, maddr, \
"data"))) { \
aot_set_last_error("llvm build load failed."); \
goto fail; \
} \
LLVMSetAlignment(value, 1 << align); \
LLVMSetVolatile(value, true); \
LLVMSetOrdering(value, LLVMAtomicOrderingSequentiallyConsistent); \
} while (0)
#define BUILD_ATOMIC_STORE(align) do { \
LLVMValueRef res; \
if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
goto fail; \
} \
if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
aot_set_last_error("llvm build store failed."); \
goto fail; \
} \
LLVMSetAlignment(res, 1 << align); \
LLVMSetVolatile(res, true); \
LLVMSetOrdering(res, LLVMAtomicOrderingSequentiallyConsistent); \
} while (0)
#endif
bool bool
aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign) uint32 align, uint32 offset, uint32 bytes,
bool sign, bool atomic)
{ {
LLVMValueRef maddr, value = NULL; LLVMValueRef maddr, value = NULL;
@ -250,7 +320,12 @@ aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
switch (bytes) { switch (bytes) {
case 4: case 4:
BUILD_PTR_CAST(INT32_PTR_TYPE); BUILD_PTR_CAST(INT32_PTR_TYPE);
BUILD_LOAD(); #if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic)
BUILD_ATOMIC_LOAD(align);
else
#endif
BUILD_LOAD();
break; break;
case 2: case 2:
case 1: case 1:
@ -258,11 +333,20 @@ aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
BUILD_PTR_CAST(INT16_PTR_TYPE); BUILD_PTR_CAST(INT16_PTR_TYPE);
else else
BUILD_PTR_CAST(INT8_PTR_TYPE); BUILD_PTR_CAST(INT8_PTR_TYPE);
BUILD_LOAD(); #if WASM_ENABLE_SHARED_MEMORY != 0
if (sign) if (atomic) {
BUILD_SIGN_EXT(I32_TYPE); BUILD_ATOMIC_LOAD(align);
else
BUILD_ZERO_EXT(I32_TYPE); BUILD_ZERO_EXT(I32_TYPE);
}
else
#endif
{
BUILD_LOAD();
if (sign)
BUILD_SIGN_EXT(I32_TYPE);
else
BUILD_ZERO_EXT(I32_TYPE);
}
break; break;
default: default:
bh_assert(0); bh_assert(0);
@ -277,7 +361,8 @@ fail:
bool bool
aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign) uint32 align, uint32 offset, uint32 bytes,
bool sign, bool atomic)
{ {
LLVMValueRef maddr, value = NULL; LLVMValueRef maddr, value = NULL;
@ -287,7 +372,12 @@ aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
switch (bytes) { switch (bytes) {
case 8: case 8:
BUILD_PTR_CAST(INT64_PTR_TYPE); BUILD_PTR_CAST(INT64_PTR_TYPE);
BUILD_LOAD(); #if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic)
BUILD_ATOMIC_LOAD(align);
else
#endif
BUILD_LOAD();
break; break;
case 4: case 4:
case 2: case 2:
@ -298,11 +388,20 @@ aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
BUILD_PTR_CAST(INT16_PTR_TYPE); BUILD_PTR_CAST(INT16_PTR_TYPE);
else else
BUILD_PTR_CAST(INT8_PTR_TYPE); BUILD_PTR_CAST(INT8_PTR_TYPE);
BUILD_LOAD(); #if WASM_ENABLE_SHARED_MEMORY != 0
if (sign) if (atomic) {
BUILD_SIGN_EXT(I64_TYPE); BUILD_ATOMIC_LOAD(align);
else
BUILD_ZERO_EXT(I64_TYPE); BUILD_ZERO_EXT(I64_TYPE);
}
else
#endif
{
BUILD_LOAD();
if (sign)
BUILD_SIGN_EXT(I64_TYPE);
else
BUILD_ZERO_EXT(I64_TYPE);
}
break; break;
default: default:
bh_assert(0); bh_assert(0);
@ -351,7 +450,7 @@ fail:
bool bool
aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes) uint32 align, uint32 offset, uint32 bytes, bool atomic)
{ {
LLVMValueRef maddr, value; LLVMValueRef maddr, value;
@ -366,18 +465,23 @@ aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
break; break;
case 2: case 2:
BUILD_PTR_CAST(INT16_PTR_TYPE); BUILD_PTR_CAST(INT16_PTR_TYPE);
BUILD_TRUNC(INT16_TYPE); BUILD_TRUNC(value, INT16_TYPE);
break; break;
case 1: case 1:
BUILD_PTR_CAST(INT8_PTR_TYPE); BUILD_PTR_CAST(INT8_PTR_TYPE);
BUILD_TRUNC(INT8_TYPE); BUILD_TRUNC(value, INT8_TYPE);
break; break;
default: default:
bh_assert(0); bh_assert(0);
break; break;
} }
BUILD_STORE(); #if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic)
BUILD_ATOMIC_STORE(align);
else
#endif
BUILD_STORE();
return true; return true;
fail: fail:
return false; return false;
@ -385,7 +489,7 @@ fail:
bool bool
aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes) uint32 align, uint32 offset, uint32 bytes, bool atomic)
{ {
LLVMValueRef maddr, value; LLVMValueRef maddr, value;
@ -400,22 +504,27 @@ aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
break; break;
case 4: case 4:
BUILD_PTR_CAST(INT32_PTR_TYPE); BUILD_PTR_CAST(INT32_PTR_TYPE);
BUILD_TRUNC(I32_TYPE); BUILD_TRUNC(value, I32_TYPE);
break; break;
case 2: case 2:
BUILD_PTR_CAST(INT16_PTR_TYPE); BUILD_PTR_CAST(INT16_PTR_TYPE);
BUILD_TRUNC(INT16_TYPE); BUILD_TRUNC(value, INT16_TYPE);
break; break;
case 1: case 1:
BUILD_PTR_CAST(INT8_PTR_TYPE); BUILD_PTR_CAST(INT8_PTR_TYPE);
BUILD_TRUNC(INT8_TYPE); BUILD_TRUNC(value, INT8_TYPE);
break; break;
default: default:
bh_assert(0); bh_assert(0);
break; break;
} }
BUILD_STORE(); #if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic)
BUILD_ATOMIC_STORE(align);
else
#endif
BUILD_STORE();
return true; return true;
fail: fail:
return false; return false;
@ -603,6 +712,36 @@ fail:
return false; return false;
} }
#define GET_AOT_FUNCTION(name, argc) do { \
if (!(func_type = LLVMFunctionType(ret_type, param_types, \
argc, false))) { \
aot_set_last_error("llvm add function type failed."); \
return false; \
} \
if (comp_ctx->is_jit_mode) { \
/* JIT mode, call the function directly */ \
if (!(func_ptr_type = LLVMPointerType(func_type, 0))) { \
aot_set_last_error("llvm add pointer type failed."); \
return false; \
} \
if (!(value = I64_CONST((uint64)(uintptr_t)name)) \
|| !(func = LLVMConstIntToPtr(value, func_ptr_type))) { \
aot_set_last_error("create LLVM value failed."); \
return false; \
} \
} \
else { \
char *func_name = #name; \
/* AOT mode, delcare the function */ \
if (!(func = LLVMGetNamedFunction(comp_ctx->module, func_name)) \
&& !(func = LLVMAddFunction(comp_ctx->module, \
func_name, func_type))) { \
aot_set_last_error("llvm add function failed."); \
return false; \
} \
} \
} while (0)
#if WASM_ENABLE_BULK_MEMORY != 0 #if WASM_ENABLE_BULK_MEMORY != 0
static LLVMValueRef static LLVMValueRef
@ -691,36 +830,6 @@ fail:
return NULL; return NULL;
} }
#define GET_AOT_FUNCTION(name, argc) do { \
if (!(func_type = LLVMFunctionType(ret_type, param_types, \
argc, false))) { \
aot_set_last_error("llvm add function type failed."); \
return false; \
} \
if (comp_ctx->is_jit_mode) { \
/* JIT mode, call the function directly */ \
if (!(func_ptr_type = LLVMPointerType(func_type, 0))) { \
aot_set_last_error("llvm add pointer type failed."); \
return false; \
} \
if (!(value = I64_CONST((uint64)(uintptr_t)name)) \
|| !(func = LLVMConstIntToPtr(value, func_ptr_type))) { \
aot_set_last_error("create LLVM value failed."); \
return false; \
} \
} \
else { \
char *func_name = #name; \
/* AOT mode, delcare the function */ \
if (!(func = LLVMGetNamedFunction(comp_ctx->module, func_name)) \
&& !(func = LLVMAddFunction(comp_ctx->module, \
func_name, func_type))) { \
aot_set_last_error("llvm add function failed."); \
return false; \
} \
} \
} while (0)
bool bool
aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 seg_index) uint32 seg_index)
@ -810,6 +919,7 @@ aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type; LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
seg = I32_CONST(seg_index); seg = I32_CONST(seg_index);
CHECK_LLVM_CONST(seg);
param_types[0] = INT8_PTR_TYPE; param_types[0] = INT8_PTR_TYPE;
param_types[1] = I32_TYPE; param_types[1] = I32_TYPE;
@ -825,7 +935,10 @@ aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
aot_set_last_error("llvm build call failed."); aot_set_last_error("llvm build call failed.");
return false; return false;
} }
return true; return true;
fail:
return false;
} }
bool bool
@ -879,4 +992,308 @@ aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
fail: fail:
return false; return false;
} }
#endif /* WASM_ENABLE_BULK_MEMORY */ #endif /* end of WASM_ENABLE_BULK_MEMORY */
#if WASM_ENABLE_SHARED_MEMORY != 0
bool
aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 atomic_op, uint8 op_type,
uint32 align, uint32 offset,
uint32 bytes)
{
LLVMValueRef maddr, value, result;
if (op_type == VALUE_TYPE_I32)
POP_I32(value);
else
POP_I64(value);
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
return false;
switch (bytes) {
case 8:
BUILD_PTR_CAST(INT64_PTR_TYPE);
break;
case 4:
BUILD_PTR_CAST(INT32_PTR_TYPE);
if (op_type == VALUE_TYPE_I64)
BUILD_TRUNC(value, I32_TYPE);
break;
case 2:
BUILD_PTR_CAST(INT16_PTR_TYPE);
BUILD_TRUNC(value, INT16_TYPE);
break;
case 1:
BUILD_PTR_CAST(INT8_PTR_TYPE);
BUILD_TRUNC(value, INT8_TYPE);
break;
default:
bh_assert(0);
break;
}
if (!(result =
LLVMBuildAtomicRMW(comp_ctx->builder,
atomic_op, maddr, value,
LLVMAtomicOrderingSequentiallyConsistent, false))) {
goto fail;
}
LLVMSetVolatile(result, true);
if (op_type == VALUE_TYPE_I32) {
if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
I32_TYPE, "result_i32"))) {
goto fail;
}
PUSH_I32(result);
}
else {
if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
I64_TYPE, "result_i64"))) {
goto fail;
}
PUSH_I64(result);
}
return true;
fail:
return false;
}
bool
aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 op_type, uint32 align,
uint32 offset, uint32 bytes)
{
LLVMValueRef maddr, value, expect, result;
if (op_type == VALUE_TYPE_I32) {
POP_I32(value);
POP_I32(expect);
}
else {
POP_I64(value);
POP_I64(expect);
}
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
return false;
switch (bytes) {
case 8:
BUILD_PTR_CAST(INT64_PTR_TYPE);
break;
case 4:
BUILD_PTR_CAST(INT32_PTR_TYPE);
if (op_type == VALUE_TYPE_I64) {
BUILD_TRUNC(value, I32_TYPE);
BUILD_TRUNC(expect, I32_TYPE);
}
break;
case 2:
BUILD_PTR_CAST(INT16_PTR_TYPE);
BUILD_TRUNC(value, INT16_TYPE);
BUILD_TRUNC(expect, INT16_TYPE);
break;
case 1:
BUILD_PTR_CAST(INT8_PTR_TYPE);
BUILD_TRUNC(value, INT8_TYPE);
BUILD_TRUNC(expect, INT8_TYPE);
break;
default:
bh_assert(0);
break;
}
if (!(result =
LLVMBuildAtomicCmpXchg(comp_ctx->builder, maddr, expect, value,
LLVMAtomicOrderingSequentiallyConsistent,
LLVMAtomicOrderingSequentiallyConsistent,
false))) {
goto fail;
}
LLVMSetVolatile(result, true);
/* CmpXchg return {i32, i1} structure,
we need to extrack the previous_value from the structure */
if (!(result =
LLVMBuildExtractValue(comp_ctx->builder,
result, 0, "previous_value"))) {
goto fail;
}
if (op_type == VALUE_TYPE_I32) {
if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
I32_TYPE, "result_i32"))) {
goto fail;
}
PUSH_I32(result);
}
else {
if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
I64_TYPE, "result_i64"))) {
goto fail;
}
PUSH_I64(result);
}
return true;
fail:
return false;
}
bool
aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 op_type, uint32 align,
uint32 offset, uint32 bytes)
{
LLVMValueRef maddr, value, timeout, expect, cmp;
LLVMValueRef param_values[5], ret_value, func, is_wait64;
LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
LLVMBasicBlockRef wait_fail, wait_success;
LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
POP_I64(timeout);
if (op_type == VALUE_TYPE_I32) {
POP_I32(expect);
is_wait64 = I8_CONST(false);
if (!(expect =
LLVMBuildZExt(comp_ctx->builder, expect,
I64_TYPE, "expect_i64"))) {
goto fail;
}
}
else {
POP_I64(expect);
is_wait64 = I8_CONST(true);
}
CHECK_LLVM_CONST(is_wait64);
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
return false;
param_types[0] = INT8_PTR_TYPE;
param_types[1] = INT8_PTR_TYPE;
param_types[2] = I64_TYPE;
param_types[3] = I64_TYPE;
param_types[4] = INT8_TYPE;
ret_type = I32_TYPE;
GET_AOT_FUNCTION(wasm_runtime_atomic_wait, 5);
/* Call function wasm_runtime_atomic_wait() */
param_values[0] = func_ctx->aot_inst;
param_values[1] = maddr;
param_values[2] = expect;
param_values[3] = timeout;
param_values[4] = is_wait64;
if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
param_values, 5, "call"))) {
aot_set_last_error("llvm build call failed.");
return false;
}
BUILD_ICMP(LLVMIntSGT, ret_value, I32_ZERO, cmp, "atomic_wait_ret");
ADD_BASIC_BLOCK(wait_fail, "atomic_wait_fail");
ADD_BASIC_BLOCK(wait_success, "wait_success");
LLVMMoveBasicBlockAfter(wait_fail, block_curr);
LLVMMoveBasicBlockAfter(wait_success, block_curr);
if (!LLVMBuildCondBr(comp_ctx->builder, cmp,
wait_success, wait_fail)) {
aot_set_last_error("llvm build cond br failed.");
goto fail;
}
/* If atomic wait failed, return this function
so the runtime can catch the exception */
LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
if (aot_func_type->result_count) {
switch (aot_func_type->types[aot_func_type->param_count]) {
case VALUE_TYPE_I32:
LLVMBuildRet(comp_ctx->builder, I32_ZERO);
break;
case VALUE_TYPE_I64:
LLVMBuildRet(comp_ctx->builder, I64_ZERO);
break;
case VALUE_TYPE_F32:
LLVMBuildRet(comp_ctx->builder, F32_ZERO);
break;
case VALUE_TYPE_F64:
LLVMBuildRet(comp_ctx->builder, F64_ZERO);
break;
}
}
else {
LLVMBuildRetVoid(comp_ctx->builder);
}
LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
PUSH_I32(ret_value);
return true;
fail:
return false;
}
bool
aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes)
{
LLVMValueRef maddr, value, count;
LLVMValueRef param_values[3], ret_value, func;
LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
POP_I32(count);
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
return false;
param_types[0] = INT8_PTR_TYPE;
param_types[1] = INT8_PTR_TYPE;
param_types[2] = I32_TYPE;
ret_type = I32_TYPE;
GET_AOT_FUNCTION(wasm_runtime_atomic_notify, 3);
/* Call function wasm_runtime_atomic_notify() */
param_values[0] = func_ctx->aot_inst;
param_values[1] = maddr;
param_values[2] = count;
if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
param_values, 3, "call"))) {
aot_set_last_error("llvm build call failed.");
return false;
}
PUSH_I32(ret_value);
return true;
fail:
return false;
}
#endif /* end of WASM_ENABLE_SHARED_MEMORY */

View File

@ -7,6 +7,9 @@
#define _AOT_EMIT_MEMORY_H_ #define _AOT_EMIT_MEMORY_H_
#include "aot_compiler.h" #include "aot_compiler.h"
#if WASM_ENABLE_SHARED_MEMORY != 0
#include "wasm_shared_memory.h"
#endif
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -14,11 +17,13 @@ extern "C" {
bool bool
aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign); uint32 align, uint32 offset, uint32 bytes,
bool sign, bool atomic);
bool bool
aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign); uint32 align, uint32 offset, uint32 bytes,
bool sign, bool atomic);
bool bool
aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
@ -30,11 +35,11 @@ aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
bool bool
aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes); uint32 align, uint32 offset, uint32 bytes, bool atomic);
bool bool
aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes); uint32 align, uint32 offset, uint32 bytes, bool atomic);
bool bool
aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
@ -66,6 +71,31 @@ bool
aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx); aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
#endif #endif
#if WASM_ENABLE_SHARED_MEMORY != 0
bool
aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 atomic_op, uint8 op_type,
uint32 align, uint32 offset,
uint32 bytes);
bool
aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 op_type, uint32 align,
uint32 offset, uint32 bytes);
bool
aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 op_type, uint32 align,
uint32 offset, uint32 bytes);
bool
aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes);
#endif
#ifdef __cplusplus #ifdef __cplusplus
} /* end of extern "C" */ } /* end of extern "C" */
#endif #endif

View File

@ -3307,13 +3307,28 @@ wasm_loader_find_block_addr(BlockAddr *block_addr_cache,
default: default:
if (error_buf) if (error_buf)
snprintf(error_buf, error_buf_size, snprintf(error_buf, error_buf_size,
"WASM loader find block addr failed: " "WASM loader find block addr failed: "
"invalid opcode fc %02x.", opcode); "invalid opcode fc %02x.", opcode);
return false; return false;
} }
break; break;
} }
#if WASM_ENABLE_SHARED_MEMORY != 0
case WASM_OP_ATOMIC_PREFIX:
{
/* atomic_op (1 u8) + memarg (2 u32_leb) */
opcode = read_uint8(p);
if (opcode != WASM_OP_ATOMIC_FENCE) {
skip_leb_uint32(p, p_end); /* align */
skip_leb_uint32(p, p_end); /* offset */
}
else {
/* atomic.fence doesn't have memarg */
p++;
}
break;
}
#endif
default: default:
if (error_buf) if (error_buf)
snprintf(error_buf, error_buf_size, snprintf(error_buf, error_buf_size,
@ -4796,6 +4811,36 @@ check_memory_access_align(uint8 opcode, uint32 align,
return true; return true;
} }
#if WASM_ENABLE_SHARED_MEMORY != 0
static bool
check_memory_align_equal(uint8 opcode, uint32 align,
char *error_buf, uint32 error_buf_size)
{
uint8 wait_notify_aligns[] = {2, 2, 3};
uint8 mem_access_aligns[] = {
2, 3, 0, 1, 0, 1, 2,
};
uint8 expect;
bh_assert((opcode <= WASM_OP_ATOMIC_WAIT64)
|| (opcode >= WASM_OP_ATOMIC_I32_LOAD
&& opcode <= WASM_OP_ATOMIC_RMW_I64_CMPXCHG32_U));
if (opcode <= WASM_OP_ATOMIC_WAIT64) {
expect = wait_notify_aligns[opcode - WASM_OP_ATOMIC_NOTIFY];
}
else {
/* 7 opcodes in every group */
expect = mem_access_aligns[(opcode - WASM_OP_ATOMIC_I32_LOAD) % 7];
}
if (align != expect) {
set_error_buf(error_buf, error_buf_size,
"alignment isn't equal to natural");
return false;
}
return true;
}
#endif /* end of WASM_ENABLE_SHARED_MEMORY */
static bool static bool
is_value_type(uint8 type) is_value_type(uint8 type)
{ {
@ -6535,6 +6580,147 @@ fail_data_cnt_sec_require:
} }
break; break;
} }
#if WASM_ENABLE_SHARED_MEMORY != 0
case WASM_OP_ATOMIC_PREFIX:
{
opcode = read_uint8(p);
#if WASM_ENABLE_FAST_INTERP != 0
emit_byte(loader_ctx, opcode);
#endif
if (opcode != WASM_OP_ATOMIC_FENCE) {
CHECK_MEMORY();
read_leb_uint32(p, p_end, align); /* align */
read_leb_uint32(p, p_end, mem_offset); /* offset */
if (!check_memory_align_equal(opcode, align,
error_buf,
error_buf_size)) {
goto fail;
}
}
switch (opcode) {
case WASM_OP_ATOMIC_NOTIFY:
POP2_AND_PUSH(VALUE_TYPE_I32, VALUE_TYPE_I32);
break;
case WASM_OP_ATOMIC_WAIT32:
POP_I64();
POP_I32();
POP_I32();
PUSH_I32();
break;
case WASM_OP_ATOMIC_WAIT64:
POP_I64();
POP_I64();
POP_I32();
PUSH_I32();
break;
case WASM_OP_ATOMIC_FENCE:
/* reserved byte 0x00 */
if (*p++ != 0x00) {
set_error_buf(error_buf, error_buf_size,
"WASM loader prepare bytecode failed: "
"zero flag expected");
goto fail;
}
break;
case WASM_OP_ATOMIC_I32_LOAD:
case WASM_OP_ATOMIC_I32_LOAD8_U:
case WASM_OP_ATOMIC_I32_LOAD16_U:
POP_AND_PUSH(VALUE_TYPE_I32, VALUE_TYPE_I32);
break;
case WASM_OP_ATOMIC_I32_STORE:
case WASM_OP_ATOMIC_I32_STORE8:
case WASM_OP_ATOMIC_I32_STORE16:
POP_I32();
POP_I32();
break;
case WASM_OP_ATOMIC_I64_LOAD:
case WASM_OP_ATOMIC_I64_LOAD8_U:
case WASM_OP_ATOMIC_I64_LOAD16_U:
case WASM_OP_ATOMIC_I64_LOAD32_U:
POP_AND_PUSH(VALUE_TYPE_I32, VALUE_TYPE_I64);
break;
case WASM_OP_ATOMIC_I64_STORE:
case WASM_OP_ATOMIC_I64_STORE8:
case WASM_OP_ATOMIC_I64_STORE16:
case WASM_OP_ATOMIC_I64_STORE32:
POP_I64();
POP_I32();
break;
case WASM_OP_ATOMIC_RMW_I32_ADD:
case WASM_OP_ATOMIC_RMW_I32_ADD8_U:
case WASM_OP_ATOMIC_RMW_I32_ADD16_U:
case WASM_OP_ATOMIC_RMW_I32_SUB:
case WASM_OP_ATOMIC_RMW_I32_SUB8_U:
case WASM_OP_ATOMIC_RMW_I32_SUB16_U:
case WASM_OP_ATOMIC_RMW_I32_AND:
case WASM_OP_ATOMIC_RMW_I32_AND8_U:
case WASM_OP_ATOMIC_RMW_I32_AND16_U:
case WASM_OP_ATOMIC_RMW_I32_OR:
case WASM_OP_ATOMIC_RMW_I32_OR8_U:
case WASM_OP_ATOMIC_RMW_I32_OR16_U:
case WASM_OP_ATOMIC_RMW_I32_XOR:
case WASM_OP_ATOMIC_RMW_I32_XOR8_U:
case WASM_OP_ATOMIC_RMW_I32_XOR16_U:
case WASM_OP_ATOMIC_RMW_I32_XCHG:
case WASM_OP_ATOMIC_RMW_I32_XCHG8_U:
case WASM_OP_ATOMIC_RMW_I32_XCHG16_U:
POP2_AND_PUSH(VALUE_TYPE_I32, VALUE_TYPE_I32);
break;
case WASM_OP_ATOMIC_RMW_I64_ADD:
case WASM_OP_ATOMIC_RMW_I64_ADD8_U:
case WASM_OP_ATOMIC_RMW_I64_ADD16_U:
case WASM_OP_ATOMIC_RMW_I64_ADD32_U:
case WASM_OP_ATOMIC_RMW_I64_SUB:
case WASM_OP_ATOMIC_RMW_I64_SUB8_U:
case WASM_OP_ATOMIC_RMW_I64_SUB16_U:
case WASM_OP_ATOMIC_RMW_I64_SUB32_U:
case WASM_OP_ATOMIC_RMW_I64_AND:
case WASM_OP_ATOMIC_RMW_I64_AND8_U:
case WASM_OP_ATOMIC_RMW_I64_AND16_U:
case WASM_OP_ATOMIC_RMW_I64_AND32_U:
case WASM_OP_ATOMIC_RMW_I64_OR:
case WASM_OP_ATOMIC_RMW_I64_OR8_U:
case WASM_OP_ATOMIC_RMW_I64_OR16_U:
case WASM_OP_ATOMIC_RMW_I64_OR32_U:
case WASM_OP_ATOMIC_RMW_I64_XOR:
case WASM_OP_ATOMIC_RMW_I64_XOR8_U:
case WASM_OP_ATOMIC_RMW_I64_XOR16_U:
case WASM_OP_ATOMIC_RMW_I64_XOR32_U:
case WASM_OP_ATOMIC_RMW_I64_XCHG:
case WASM_OP_ATOMIC_RMW_I64_XCHG8_U:
case WASM_OP_ATOMIC_RMW_I64_XCHG16_U:
case WASM_OP_ATOMIC_RMW_I64_XCHG32_U:
POP_I64();
POP_I32();
PUSH_I64();
break;
case WASM_OP_ATOMIC_RMW_I32_CMPXCHG:
case WASM_OP_ATOMIC_RMW_I32_CMPXCHG8_U:
case WASM_OP_ATOMIC_RMW_I32_CMPXCHG16_U:
POP_I32();
POP_I32();
POP_I32();
PUSH_I32();
break;
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG:
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG8_U:
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG16_U:
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG32_U:
POP_I64();
POP_I64();
POP_I32();
PUSH_I64();
break;
default:
if (error_buf != NULL)
snprintf(error_buf, error_buf_size,
"WASM module load failed: "
"invalid opcode 0xfe %02x.", opcode);
goto fail;
}
break;
}
#endif /* end of WASM_ENABLE_SHARED_MEMORY */
default: default:
if (error_buf != NULL) if (error_buf != NULL)
snprintf(error_buf, error_buf_size, snprintf(error_buf, error_buf_size,
@ -6566,7 +6752,7 @@ fail_data_cnt_sec_require:
goto fail; goto fail;
} }
func_const_end = func->consts + func->const_cell_num * 4; func_const_end = func->consts + func->const_cell_num * 4;
// reverse the const buf /* reverse the const buf */
for (int i = loader_ctx->num_const - 1; i >= 0; i--) { for (int i = loader_ctx->num_const - 1; i >= 0; i--) {
Const *c = (Const*)(loader_ctx->const_buf + i * sizeof(Const)); Const *c = (Const*)(loader_ctx->const_buf + i * sizeof(Const));
if (c->value_type == VALUE_TYPE_F64 if (c->value_type == VALUE_TYPE_F64

View File

@ -2331,6 +2331,23 @@ wasm_loader_find_block_addr(BlockAddr *block_addr_cache,
break; break;
} }
#if WASM_ENABLE_SHARED_MEMORY != 0
case WASM_OP_ATOMIC_PREFIX:
{
/* atomic_op (1 u8) + memarg (2 u32_leb) */
opcode = read_uint8(p);
if (opcode != WASM_OP_ATOMIC_FENCE) {
skip_leb_uint32(p, p_end); /* align */
skip_leb_uint32(p, p_end); /* offset */
}
else {
/* atomic.fence doesn't have memarg */
p++;
}
break;
}
#endif
default: default:
bh_assert(0); bh_assert(0);
break; break;
@ -4953,6 +4970,8 @@ handle_op_block_and_loop:
bh_assert(*p == 0x00); bh_assert(*p == 0x00);
p++; p++;
PUSH_I32(); PUSH_I32();
module->possible_memory_grow = true;
break; break;
case WASM_OP_MEMORY_GROW: case WASM_OP_MEMORY_GROW:
@ -5318,6 +5337,136 @@ handle_op_block_and_loop:
break; break;
} }
#if WASM_ENABLE_SHARED_MEMORY != 0
case WASM_OP_ATOMIC_PREFIX:
{
opcode = read_uint8(p);
#if WASM_ENABLE_FAST_INTERP != 0
emit_byte(loader_ctx, opcode);
#endif
if (opcode != WASM_OP_ATOMIC_FENCE) {
CHECK_MEMORY();
read_leb_uint32(p, p_end, align); /* align */
read_leb_uint32(p, p_end, mem_offset); /* offset */
}
switch (opcode) {
case WASM_OP_ATOMIC_NOTIFY:
POP2_AND_PUSH(VALUE_TYPE_I32, VALUE_TYPE_I32);
break;
case WASM_OP_ATOMIC_WAIT32:
POP_I64();
POP_I32();
POP_I32();
PUSH_I32();
break;
case WASM_OP_ATOMIC_WAIT64:
POP_I64();
POP_I64();
POP_I32();
PUSH_I32();
break;
case WASM_OP_ATOMIC_FENCE:
/* reserved byte 0x00 */
bh_assert(*p == 0x00);
p++;
break;
case WASM_OP_ATOMIC_I32_LOAD:
case WASM_OP_ATOMIC_I32_LOAD8_U:
case WASM_OP_ATOMIC_I32_LOAD16_U:
POP_AND_PUSH(VALUE_TYPE_I32, VALUE_TYPE_I32);
break;
case WASM_OP_ATOMIC_I32_STORE:
case WASM_OP_ATOMIC_I32_STORE8:
case WASM_OP_ATOMIC_I32_STORE16:
POP_I32();
POP_I32();
break;
case WASM_OP_ATOMIC_I64_LOAD:
case WASM_OP_ATOMIC_I64_LOAD8_U:
case WASM_OP_ATOMIC_I64_LOAD16_U:
case WASM_OP_ATOMIC_I64_LOAD32_U:
POP_AND_PUSH(VALUE_TYPE_I32, VALUE_TYPE_I64);
break;
case WASM_OP_ATOMIC_I64_STORE:
case WASM_OP_ATOMIC_I64_STORE8:
case WASM_OP_ATOMIC_I64_STORE16:
case WASM_OP_ATOMIC_I64_STORE32:
POP_I64();
POP_I32();
break;
case WASM_OP_ATOMIC_RMW_I32_ADD:
case WASM_OP_ATOMIC_RMW_I32_ADD8_U:
case WASM_OP_ATOMIC_RMW_I32_ADD16_U:
case WASM_OP_ATOMIC_RMW_I32_SUB:
case WASM_OP_ATOMIC_RMW_I32_SUB8_U:
case WASM_OP_ATOMIC_RMW_I32_SUB16_U:
case WASM_OP_ATOMIC_RMW_I32_AND:
case WASM_OP_ATOMIC_RMW_I32_AND8_U:
case WASM_OP_ATOMIC_RMW_I32_AND16_U:
case WASM_OP_ATOMIC_RMW_I32_OR:
case WASM_OP_ATOMIC_RMW_I32_OR8_U:
case WASM_OP_ATOMIC_RMW_I32_OR16_U:
case WASM_OP_ATOMIC_RMW_I32_XOR:
case WASM_OP_ATOMIC_RMW_I32_XOR8_U:
case WASM_OP_ATOMIC_RMW_I32_XOR16_U:
case WASM_OP_ATOMIC_RMW_I32_XCHG:
case WASM_OP_ATOMIC_RMW_I32_XCHG8_U:
case WASM_OP_ATOMIC_RMW_I32_XCHG16_U:
POP2_AND_PUSH(VALUE_TYPE_I32, VALUE_TYPE_I32);
break;
case WASM_OP_ATOMIC_RMW_I64_ADD:
case WASM_OP_ATOMIC_RMW_I64_ADD8_U:
case WASM_OP_ATOMIC_RMW_I64_ADD16_U:
case WASM_OP_ATOMIC_RMW_I64_ADD32_U:
case WASM_OP_ATOMIC_RMW_I64_SUB:
case WASM_OP_ATOMIC_RMW_I64_SUB8_U:
case WASM_OP_ATOMIC_RMW_I64_SUB16_U:
case WASM_OP_ATOMIC_RMW_I64_SUB32_U:
case WASM_OP_ATOMIC_RMW_I64_AND:
case WASM_OP_ATOMIC_RMW_I64_AND8_U:
case WASM_OP_ATOMIC_RMW_I64_AND16_U:
case WASM_OP_ATOMIC_RMW_I64_AND32_U:
case WASM_OP_ATOMIC_RMW_I64_OR:
case WASM_OP_ATOMIC_RMW_I64_OR8_U:
case WASM_OP_ATOMIC_RMW_I64_OR16_U:
case WASM_OP_ATOMIC_RMW_I64_OR32_U:
case WASM_OP_ATOMIC_RMW_I64_XOR:
case WASM_OP_ATOMIC_RMW_I64_XOR8_U:
case WASM_OP_ATOMIC_RMW_I64_XOR16_U:
case WASM_OP_ATOMIC_RMW_I64_XOR32_U:
case WASM_OP_ATOMIC_RMW_I64_XCHG:
case WASM_OP_ATOMIC_RMW_I64_XCHG8_U:
case WASM_OP_ATOMIC_RMW_I64_XCHG16_U:
case WASM_OP_ATOMIC_RMW_I64_XCHG32_U:
POP_I64();
POP_I32();
PUSH_I64();
break;
case WASM_OP_ATOMIC_RMW_I32_CMPXCHG:
case WASM_OP_ATOMIC_RMW_I32_CMPXCHG8_U:
case WASM_OP_ATOMIC_RMW_I32_CMPXCHG16_U:
POP_I32();
POP_I32();
POP_I32();
PUSH_I32();
break;
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG:
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG8_U:
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG16_U:
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG32_U:
POP_I64();
POP_I64();
POP_I32();
PUSH_I64();
break;
default:
bh_assert(0);
break;
}
break;
}
#endif /* end of WASM_ENABLE_SHARED_MEMORY */
default: default:
bh_assert(0); bh_assert(0);
break; break;

View File

@ -263,6 +263,7 @@ typedef enum WASMOpcode {
/* Post-MVP extend op prefix */ /* Post-MVP extend op prefix */
WASM_OP_MISC_PREFIX = 0xfc, WASM_OP_MISC_PREFIX = 0xfc,
WASM_OP_ATOMIC_PREFIX = 0xfe,
} WASMOpcode; } WASMOpcode;
typedef enum WASMMiscEXTOpcode { typedef enum WASMMiscEXTOpcode {
@ -285,6 +286,85 @@ typedef enum WASMMiscEXTOpcode {
#endif #endif
} WASMMiscEXTOpcode; } WASMMiscEXTOpcode;
typedef enum WASMAtomicEXTOpcode {
/* atomic wait and notify */
WASM_OP_ATOMIC_NOTIFY = 0x00,
WASM_OP_ATOMIC_WAIT32 = 0x01,
WASM_OP_ATOMIC_WAIT64 = 0x02,
WASM_OP_ATOMIC_FENCE = 0x03,
/* atomic load and store */
WASM_OP_ATOMIC_I32_LOAD = 0x10,
WASM_OP_ATOMIC_I64_LOAD = 0x11,
WASM_OP_ATOMIC_I32_LOAD8_U = 0x12,
WASM_OP_ATOMIC_I32_LOAD16_U = 0x13,
WASM_OP_ATOMIC_I64_LOAD8_U = 0x14,
WASM_OP_ATOMIC_I64_LOAD16_U = 0x15,
WASM_OP_ATOMIC_I64_LOAD32_U = 0x16,
WASM_OP_ATOMIC_I32_STORE = 0x17,
WASM_OP_ATOMIC_I64_STORE = 0x18,
WASM_OP_ATOMIC_I32_STORE8 = 0x19,
WASM_OP_ATOMIC_I32_STORE16 = 0x1a,
WASM_OP_ATOMIC_I64_STORE8 = 0x1b,
WASM_OP_ATOMIC_I64_STORE16 = 0x1c,
WASM_OP_ATOMIC_I64_STORE32 = 0x1d,
/* atomic add */
WASM_OP_ATOMIC_RMW_I32_ADD = 0x1e,
WASM_OP_ATOMIC_RMW_I64_ADD = 0x1f,
WASM_OP_ATOMIC_RMW_I32_ADD8_U = 0x20,
WASM_OP_ATOMIC_RMW_I32_ADD16_U = 0x21,
WASM_OP_ATOMIC_RMW_I64_ADD8_U = 0x22,
WASM_OP_ATOMIC_RMW_I64_ADD16_U = 0x23,
WASM_OP_ATOMIC_RMW_I64_ADD32_U = 0x24,
/* atomic sub */
WASM_OP_ATOMIC_RMW_I32_SUB = 0x25,
WASM_OP_ATOMIC_RMW_I64_SUB = 0x26,
WASM_OP_ATOMIC_RMW_I32_SUB8_U = 0x27,
WASM_OP_ATOMIC_RMW_I32_SUB16_U = 0x28,
WASM_OP_ATOMIC_RMW_I64_SUB8_U = 0x29,
WASM_OP_ATOMIC_RMW_I64_SUB16_U = 0x2a,
WASM_OP_ATOMIC_RMW_I64_SUB32_U = 0x2b,
/* atomic and */
WASM_OP_ATOMIC_RMW_I32_AND = 0x2c,
WASM_OP_ATOMIC_RMW_I64_AND = 0x2d,
WASM_OP_ATOMIC_RMW_I32_AND8_U = 0x2e,
WASM_OP_ATOMIC_RMW_I32_AND16_U = 0x2f,
WASM_OP_ATOMIC_RMW_I64_AND8_U = 0x30,
WASM_OP_ATOMIC_RMW_I64_AND16_U = 0x31,
WASM_OP_ATOMIC_RMW_I64_AND32_U = 0x32,
/* atomic or */
WASM_OP_ATOMIC_RMW_I32_OR = 0x33,
WASM_OP_ATOMIC_RMW_I64_OR = 0x34,
WASM_OP_ATOMIC_RMW_I32_OR8_U = 0x35,
WASM_OP_ATOMIC_RMW_I32_OR16_U = 0x36,
WASM_OP_ATOMIC_RMW_I64_OR8_U = 0x37,
WASM_OP_ATOMIC_RMW_I64_OR16_U = 0x38,
WASM_OP_ATOMIC_RMW_I64_OR32_U = 0x39,
/* atomic xor */
WASM_OP_ATOMIC_RMW_I32_XOR = 0x3a,
WASM_OP_ATOMIC_RMW_I64_XOR = 0x3b,
WASM_OP_ATOMIC_RMW_I32_XOR8_U = 0x3c,
WASM_OP_ATOMIC_RMW_I32_XOR16_U = 0x3d,
WASM_OP_ATOMIC_RMW_I64_XOR8_U = 0x3e,
WASM_OP_ATOMIC_RMW_I64_XOR16_U = 0x3f,
WASM_OP_ATOMIC_RMW_I64_XOR32_U = 0x40,
/* atomic xchg */
WASM_OP_ATOMIC_RMW_I32_XCHG = 0x41,
WASM_OP_ATOMIC_RMW_I64_XCHG = 0x42,
WASM_OP_ATOMIC_RMW_I32_XCHG8_U = 0x43,
WASM_OP_ATOMIC_RMW_I32_XCHG16_U = 0x44,
WASM_OP_ATOMIC_RMW_I64_XCHG8_U = 0x45,
WASM_OP_ATOMIC_RMW_I64_XCHG16_U = 0x46,
WASM_OP_ATOMIC_RMW_I64_XCHG32_U = 0x47,
/* atomic cmpxchg */
WASM_OP_ATOMIC_RMW_I32_CMPXCHG = 0x48,
WASM_OP_ATOMIC_RMW_I64_CMPXCHG = 0x49,
WASM_OP_ATOMIC_RMW_I32_CMPXCHG8_U = 0x4a,
WASM_OP_ATOMIC_RMW_I32_CMPXCHG16_U = 0x4b,
WASM_OP_ATOMIC_RMW_I64_CMPXCHG8_U = 0x4c,
WASM_OP_ATOMIC_RMW_I64_CMPXCHG16_U = 0x4d,
WASM_OP_ATOMIC_RMW_I64_CMPXCHG32_U = 0x4e,
} WASMAtomicEXTOpcode;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -1,59 +0,0 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
/**
* The byte array buffer is the file content of a test wasm binary file,
* which is compiled by emcc or clang toolchain from C source file of:
* core/iwasm/app-samples/hello-world/main.c.
*/
unsigned char wasm_test_file[] = { 0x00, 0x61, 0x73, 0x6D, 0x01, 0x00, 0x00,
0x00, 0x00, 0x0D, 0x06, 0x64, 0x79, 0x6C, 0x69, 0x6E, 0x6B, 0xC0, 0x80,
0x04, 0x04, 0x00, 0x00, 0x01, 0x13, 0x04, 0x60, 0x01, 0x7F, 0x00, 0x60,
0x01, 0x7F, 0x01, 0x7F, 0x60, 0x02, 0x7F, 0x7F, 0x01, 0x7F, 0x60, 0x00,
0x00, 0x02, 0x58, 0x06, 0x03, 0x65, 0x6E, 0x76, 0x05, 0x5F, 0x66, 0x72,
0x65, 0x65, 0x00, 0x00, 0x03, 0x65, 0x6E, 0x76, 0x07, 0x5F, 0x6D, 0x61,
0x6C, 0x6C, 0x6F, 0x63, 0x00, 0x01, 0x03, 0x65, 0x6E, 0x76, 0x07, 0x5F,
0x70, 0x72, 0x69, 0x6E, 0x74, 0x66, 0x00, 0x02, 0x03, 0x65, 0x6E, 0x76,
0x05, 0x5F, 0x70, 0x75, 0x74, 0x73, 0x00, 0x01, 0x03, 0x65, 0x6E, 0x76,
0x0D, 0x5F, 0x5F, 0x6D, 0x65, 0x6D, 0x6F, 0x72, 0x79, 0x5F, 0x62, 0x61,
0x73, 0x65, 0x03, 0x7F, 0x00, 0x03, 0x65, 0x6E, 0x76, 0x06, 0x6D, 0x65,
0x6D, 0x6F, 0x72, 0x79, 0x02, 0x00, 0x01, 0x03, 0x04, 0x03, 0x02, 0x03,
0x03, 0x06, 0x10, 0x03, 0x7F, 0x01, 0x41, 0x00, 0x0B, 0x7F, 0x01, 0x41,
0x00, 0x0B, 0x7F, 0x00, 0x41, 0x1B, 0x0B, 0x07, 0x33, 0x04, 0x12, 0x5F,
0x5F, 0x70, 0x6F, 0x73, 0x74, 0x5F, 0x69, 0x6E, 0x73, 0x74, 0x61, 0x6E,
0x74, 0x69, 0x61, 0x74, 0x65, 0x00, 0x06, 0x05, 0x5F, 0x6D, 0x61, 0x69,
0x6E, 0x00, 0x04, 0x0B, 0x72, 0x75, 0x6E, 0x50, 0x6F, 0x73, 0x74, 0x53,
0x65, 0x74, 0x73, 0x00, 0x05, 0x04, 0x5F, 0x73, 0x74, 0x72, 0x03, 0x03,
0x0A, 0xBA, 0x01, 0x03, 0x9E, 0x01, 0x01, 0x01, 0x7F, 0x23, 0x01, 0x21,
0x00, 0x23, 0x01, 0x41, 0x10, 0x6A, 0x24, 0x01, 0x20, 0x00, 0x41, 0x08,
0x6A, 0x21, 0x02, 0x23, 0x00, 0x41, 0x1B, 0x6A, 0x10, 0x03, 0x1A, 0x41,
0x80, 0x08, 0x10, 0x01, 0x21, 0x01, 0x20, 0x01, 0x04, 0x7F, 0x20, 0x00,
0x20, 0x01, 0x36, 0x02, 0x00, 0x23, 0x00, 0x20, 0x00, 0x10, 0x02, 0x1A,
0x20, 0x01, 0x23, 0x00, 0x2C, 0x00, 0x0D, 0x3A, 0x00, 0x00, 0x20, 0x01,
0x23, 0x00, 0x2C, 0x00, 0x0E, 0x3A, 0x00, 0x01, 0x20, 0x01, 0x23, 0x00,
0x2C, 0x00, 0x0F, 0x3A, 0x00, 0x02, 0x20, 0x01, 0x23, 0x00, 0x2C, 0x00,
0x10, 0x3A, 0x00, 0x03, 0x20, 0x01, 0x23, 0x00, 0x2C, 0x00, 0x11, 0x3A,
0x00, 0x04, 0x20, 0x01, 0x23, 0x00, 0x2C, 0x00, 0x12, 0x3A, 0x00, 0x05,
0x20, 0x02, 0x20, 0x01, 0x36, 0x02, 0x00, 0x23, 0x00, 0x41, 0x13, 0x6A,
0x20, 0x02, 0x10, 0x02, 0x1A, 0x20, 0x01, 0x10, 0x00, 0x20, 0x00, 0x24,
0x01, 0x41, 0x00, 0x05, 0x23, 0x00, 0x41, 0x28, 0x6A, 0x10, 0x03, 0x1A,
0x20, 0x00, 0x24, 0x01, 0x41, 0x7F, 0x0B, 0x0B, 0x03, 0x00, 0x01, 0x0B,
0x14, 0x00, 0x23, 0x00, 0x41, 0x40, 0x6B, 0x24, 0x01, 0x23, 0x01, 0x41,
0x80, 0x80, 0x04, 0x6A, 0x24, 0x02, 0x10, 0x05, 0x0B, 0x0B, 0x3F, 0x01,
0x00, 0x23, 0x00, 0x0B, 0x39, 0x62, 0x75, 0x66, 0x20, 0x70, 0x74, 0x72,
0x3A, 0x20, 0x25, 0x70, 0x0A, 0x00, 0x31, 0x32, 0x33, 0x34, 0x0A, 0x00,
0x62, 0x75, 0x66, 0x3A, 0x20, 0x25, 0x73, 0x00, 0x48, 0x65, 0x6C, 0x6C,
0x6F, 0x20, 0x77, 0x6F, 0x72, 0x6C, 0x64, 0x21, 0x00, 0x6D, 0x61, 0x6C,
0x6C, 0x6F, 0x63, 0x20, 0x62, 0x75, 0x66, 0x20, 0x66, 0x61, 0x69, 0x6C,
0x65, 0x64, 0x00, 0x50, 0x04, 0x6E, 0x61, 0x6D, 0x65, 0x01, 0x49, 0x07,
0x00, 0x05, 0x5F, 0x66, 0x72, 0x65, 0x65, 0x01, 0x07, 0x5F, 0x6D, 0x61,
0x6C, 0x6C, 0x6F, 0x63, 0x02, 0x07, 0x5F, 0x70, 0x72, 0x69, 0x6E, 0x74,
0x66, 0x03, 0x05, 0x5F, 0x70, 0x75, 0x74, 0x73, 0x04, 0x05, 0x5F, 0x6D,
0x61, 0x69, 0x6E, 0x05, 0x0B, 0x72, 0x75, 0x6E, 0x50, 0x6F, 0x73, 0x74,
0x53, 0x65, 0x74, 0x73, 0x06, 0x12, 0x5F, 0x5F, 0x70, 0x6F, 0x73, 0x74,
0x5F, 0x69, 0x6E, 0x73, 0x74, 0x61, 0x6E, 0x74, 0x69, 0x61, 0x74, 0x65,
0x00, 0x20, 0x10, 0x73, 0x6F, 0x75, 0x72, 0x63, 0x65, 0x4D, 0x61, 0x70,
0x70, 0x69, 0x6E, 0x67, 0x55, 0x52, 0x4C, 0x0E, 0x61, 0x2E, 0x6F, 0x75,
0x74, 0x2E, 0x77, 0x61, 0x73, 0x6D, 0x2E, 0x6D, 0x61, 0x70 };