Merge pull request #1392 from bytecodealliance/main

Merge main into dev/socket
This commit is contained in:
Wenyong Huang 2022-08-20 12:31:10 +08:00 committed by GitHub
commit 0e17ab2f75
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
209 changed files with 26189 additions and 1212 deletions

98
.devcontainer/Dockerfile Normal file
View File

@ -0,0 +1,98 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# See here for image contents: https://github.com/microsoft/vscode-dev-containers/tree/v0.195.0/containers/cpp/.devcontainer/base.Dockerfile
# [Choice] Debian / Ubuntu version (use Debian 11/9, Ubuntu 18.04/21.04 on local arm64/Apple Silicon): debian-11, debian-10, debian-9, ubuntu-21.04, ubuntu-20.04, ubuntu-18.04
ARG VARIANT=ubuntu-20.04
FROM mcr.microsoft.com/vscode/devcontainers/cpp:0-${VARIANT}
ARG DEBIAN_FRONTEND=noninteractive
ENV TZ=Asian/Shanghai
RUN apt update \
&& apt install -y apt-transport-https apt-utils build-essential \
ca-certificates curl g++-multilib git gnupg \
libgcc-9-dev lib32gcc-9-dev lsb-release \
ninja-build ocaml ocamlbuild python2.7 \
software-properties-common tree tzdata \
unzip valgrind vim wget zip
#
# CMAKE (https://apt.kitware.com/)
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /usr/share/keyrings/kitware-archive-keyring.gpg > /dev/null \
&& echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ bionic main' | tee /etc/apt/sources.list.d/kitware.list >/dev/null \
&& apt update \
&& rm /usr/share/keyrings/kitware-archive-keyring.gpg \
&& apt install -y kitware-archive-keyring \
&& apt install -y cmake
#
# install emsdk
RUN cd /opt \
&& git clone https://github.com/emscripten-core/emsdk.git
RUN cd /opt/emsdk \
&& git pull \
&& ./emsdk install 2.0.26 \
&& ./emsdk activate 2.0.26 \
&& echo "source /opt/emsdk/emsdk_env.sh" >> /root/.bashrc
#
# install wasi-sdk
ARG WASI_SDK_VER=16
RUN wget -c https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-${WASI_SDK_VER}/wasi-sdk-${WASI_SDK_VER}.0-linux.tar.gz -P /opt
RUN tar xf /opt/wasi-sdk-${WASI_SDK_VER}.0-linux.tar.gz -C /opt \
&& ln -fs /opt/wasi-sdk-${WASI_SDK_VER}.0 /opt/wasi-sdk
RUN rm /opt/wasi-sdk-${WASI_SDK_VER}.0-linux.tar.gz
#
#install wabt
ARG WABT_VER=1.0.29
RUN wget -c https://github.com/WebAssembly/wabt/releases/download/${WABT_VER}/wabt-${WABT_VER}-ubuntu.tar.gz -P /opt
RUN tar xf /opt/wabt-${WABT_VER}-ubuntu.tar.gz -C /opt \
&& ln -fs /opt/wabt-${WABT_VER} /opt/wabt
RUN rm /opt/wabt-${WABT_VER}-ubuntu.tar.gz
#
# install bazelisk
ARG BAZELISK_VER=1.12.0
RUN mkdir /opt/bazelisk
RUN wget -c https://github.com/bazelbuild/bazelisk/releases/download/v${BAZELISK_VER}/bazelisk-linux-amd64 -P /opt/bazelisk
RUN chmod a+x /opt/bazelisk/bazelisk-linux-amd64 \
&& ln -fs /opt/bazelisk/bazelisk-linux-amd64 /opt/bazelisk/bazel
#
# install clang+llvm
RUN cd /etc/apt/apt.conf.d \
&& touch 99verfiy-peer.conf \
&& echo "Acquire { https::Verify-Peer false }" > 99verfiy-peer.conf
RUN cd /tmp \
&& wget https://apt.llvm.org/llvm.sh \
&& chmod a+x ./llvm.sh
RUN /tmp/llvm.sh 12 all
RUN ln -sf /usr/bin/clang-format-12 /usr/bin/clang-format
#
# [Optional]
#
# Install pip
RUN apt update && apt install -y --reinstall python3-venv python3-pip
RUN python3 -m pip install --upgrade pip
#
# Install required python packages
RUN pip3 install --user black nose pycparser pylint
# set path
ENV PATH "/opt/bazelisk:/opt/clang-llvm/bin:${PATH}"
RUN echo "export PATH=/opt/bazelisk:/opt/clang-llvm/bin:${PATH}" >> /root/.bashrc
#
# PS
RUN echo "PS1='\n[ \u@wamr-dev-docker \W ]\n$ '" >> /root/.bashrc
# Clean up
RUN apt-get autoremove -y \
&& apt-get clean -y \
&& rm -rf /var/lib/apt/lists/* \
&& rm -rf /tmp/*

View File

@ -0,0 +1,45 @@
// Copyright (C) 2019 Intel Corporation. All rights reserved.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// For format details, see https://aka.ms/vscode-remote/devcontainer.json or this file's README at:
// https://github.com/microsoft/vscode-dev-containers/tree/v0.195.0/containers/cpp
{
"name": "WAMR-Dev",
"build": {
"dockerfile": "Dockerfile",
// Update 'VARIANT' to pick an Debian / Ubuntu OS version: debian-11, debian-10, debian-9, ubuntu-21.04, ubuntu-20.04, ubuntu-18.04
// Use Debian 11, Debian 9, Ubuntu 18.04 or Ubuntu 21.04 on local arm64/Apple Silicon
"args": {
"VARIANT": "ubuntu-20.04"
}
},
"runArgs": [
"--cap-add=SYS_PTRACE",
"--security-opt",
"seccomp=unconfined"
],
// Configure tool-specific properties.
"customizations": {
// Configure properties specific to VS Code.
"vscode": {
// Set *default* container specific settings.json values on container create.
"settings": {},
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"dtsvet.vscode-wasm",
"esbenp.prettier-vscode",
"ms-python.python",
"ms-python.vscode-pylance",
"ms-vscode.cmake-tools",
"ms-vscode.cpptools",
"twxs.cmake"
]
}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Use 'postCreateCommand' to run commands after the container is created.
"postCreateCommand": "curl https://sh.rustup.rs -sSf | bash -s -- -y",
// Comment out this line to run as root instead.
"remoteUser": "vscode"
}

View File

@ -1,7 +1,7 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
name: compilation on android, ubuntu-18.04, ubuntu-20.04
name: compilation on android, ubuntu-20.04, ubuntu-22.04
on:
# will be triggered on PR events
@ -52,7 +52,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-18.04, ubuntu-20.04]
os: [ubuntu-20.04, ubuntu-22.04]
steps:
- name: Cancel Workflow Action
uses: styfle/cancel-workflow-action@0.9.1
@ -67,14 +67,14 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-18.04, ubuntu-20.04]
os: [ubuntu-20.04, ubuntu-22.04]
outputs:
traffic_light_on_ubuntu_1804: ${{ steps.do_check_on_ubuntu_1804.outputs.light }}
traffic_light_on_ubuntu_2004: ${{ steps.do_check_on_ubuntu_2004.outputs.light }}
traffic_light_on_ubuntu_2204: ${{ steps.do_check_on_ubuntu_2204.outputs.light }}
steps:
- name: do_check_on_ubuntu_1804
id: do_check_on_ubuntu_1804
if: ${{ matrix.os == 'ubuntu-18.04' }}
- name: do_check_on_ubuntu_2004
id: do_check_on_ubuntu_2004
if: ${{ matrix.os == 'ubuntu-20.04' }}
run: |
if [[ ${{ github.repository }} == */wasm-micro-runtime ]]; then
echo "::set-output name=light::green"
@ -82,9 +82,9 @@ jobs:
echo "::set-output name=light::red"
fi
- name: do_check_on_ubuntu_2004
id: do_check_on_ubuntu_2004
if: ${{ matrix.os == 'ubuntu-20.04' }}
- name: do_check_on_ubuntu_2204
id: do_check_on_ubuntu_2204
if: ${{ matrix.os == 'ubuntu-22.04' }}
run: |
if [[ ${{ github.repository }} == */wasm-micro-runtime ]]; then
echo "::set-output name=light::green"
@ -97,12 +97,12 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-18.04, ubuntu-20.04]
os: [ubuntu-20.04, ubuntu-22.04]
include:
- os: ubuntu-18.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_1804 }}
- os: ubuntu-20.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_2004 }}
- os: ubuntu-22.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_2204 }}
steps:
- name: light status
run: echo "matrix.os=${{ matrix.os }}, light=${{ matrix.light }}"
@ -135,12 +135,12 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-18.04, ubuntu-20.04]
os: [ubuntu-20.04, ubuntu-22.04]
include:
- os: ubuntu-18.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_1804 }}
- os: ubuntu-20.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_2004 }}
- os: ubuntu-22.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_2204 }}
steps:
- name: light status
run: echo "matrix.os=${{ matrix.os }}, light=${{ matrix.light }}"
@ -204,7 +204,7 @@ jobs:
"-DWAMR_BUILD_TAIL_CALL=1",
"-DWAMR_DISABLE_HW_BOUND_CHECK=1",
]
os: [ubuntu-18.04, ubuntu-20.04]
os: [ubuntu-20.04, ubuntu-22.04]
platform: [android, linux]
exclude:
# uncompatiable feature and platform
@ -248,10 +248,10 @@ jobs:
- make_options_run_mode: $MC_JIT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_MINI_LOADER=1"
include:
- os: ubuntu-18.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_1804 }}
- os: ubuntu-20.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_2004 }}
- os: ubuntu-22.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_2204 }}
steps:
- name: light status
run: echo "matrix.os=${{ matrix.os }}, light=${{ matrix.light }}"
@ -299,16 +299,16 @@ jobs:
$MC_JIT_BUILD_OPTIONS,
$AOT_BUILD_OPTIONS,
]
os: [ubuntu-18.04, ubuntu-20.04]
os: [ubuntu-20.04, ubuntu-22.04]
include:
- os: ubuntu-18.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_1804 }}
wasi_sdk_release: https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-12/wasi-sdk-12.0-linux.tar.gz
wabt_release: https://github.com/WebAssembly/wabt/releases/download/1.0.24/wabt-1.0.24-ubuntu.tar.gz
- os: ubuntu-20.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_2004 }}
wasi_sdk_release: https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-12/wasi-sdk-12.0-linux.tar.gz
wabt_release: https://github.com/WebAssembly/wabt/releases/download/1.0.24/wabt-1.0.24-ubuntu.tar.gz
- os: ubuntu-22.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_2204 }}
wasi_sdk_release: https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-12/wasi-sdk-12.0-linux.tar.gz
wabt_release: https://github.com/WebAssembly/wabt/releases/download/1.0.24/wabt-1.0.24-ubuntu.tar.gz
steps:
- name: light status
run: echo "matrix.os=${{ matrix.os }}, light=${{ matrix.light }}"
@ -374,14 +374,14 @@ jobs:
strategy:
matrix:
include:
- os: ubuntu-18.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_1804 }}
wasi_sdk_release: https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-12/wasi-sdk-12.0-linux.tar.gz
wabt_release: https://github.com/WebAssembly/wabt/releases/download/1.0.24/wabt-1.0.24-ubuntu.tar.gz
- os: ubuntu-20.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_2004 }}
wasi_sdk_release: https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-12/wasi-sdk-12.0-linux.tar.gz
wabt_release: https://github.com/WebAssembly/wabt/releases/download/1.0.24/wabt-1.0.24-ubuntu.tar.gz
- os: ubuntu-22.04
light: ${{ needs.check_repo.outputs.traffic_light_on_ubuntu_2204 }}
wasi_sdk_release: https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-12/wasi-sdk-12.0-linux.tar.gz
wabt_release: https://github.com/WebAssembly/wabt/releases/download/1.0.24/wabt-1.0.24-ubuntu.tar.gz
steps:
- name: light status
run: echo "matrix.os=${{ matrix.os }}, light=${{ matrix.light }}"
@ -454,7 +454,7 @@ jobs:
runs-on: ubuntu-20.04
strategy:
matrix:
test_option: [$DEFAULT_TEST_OPTIONS, $SIMD_TEST_OPTIONS]
test_option: [$DEFAULT_TEST_OPTIONS]
steps:
- name: checkout
uses: actions/checkout@v3
@ -488,7 +488,13 @@ jobs:
runs-on: ubuntu-20.04
strategy:
matrix:
test_option: [$MULTI_MODULES_TEST_OPTIONS, $THREADS_TEST_OPTIONS]
running_mode: ["classic-interp", "fast-interp", "jit", "aot"]
test_option:
[
$MULTI_MODULES_TEST_OPTIONS,
$SIMD_TEST_OPTIONS,
$THREADS_TEST_OPTIONS,
]
steps:
- name: checkout
uses: actions/checkout@v3
@ -513,7 +519,7 @@ jobs:
run: sudo apt install -y ninja-build
- name: run spec tests
run: ./test_wamr.sh ${{ matrix.test_option }}
run: ./test_wamr.sh ${{ matrix.test_option }} -t ${{ matrix.running_mode }}
working-directory: ./tests/wamr-test-suites
spec_test_x86_32:
@ -522,6 +528,7 @@ jobs:
runs-on: ubuntu-20.04
strategy:
matrix:
running_mode: ["classic-interp", "fast-interp", "jit", "aot"]
test_option: [$DEFAULT_TEST_OPTIONS, $THREADS_TEST_OPTIONS]
steps:
- name: checkout
@ -553,5 +560,5 @@ jobs:
sudo apt install -y g++-multilib lib32gcc-9-dev ninja-build
- name: run spec tests
run: ./test_wamr.sh ${{ env.X86_32_TARGET_TEST_OPTIONS }} ${{ matrix.test_option }}
run: ./test_wamr.sh ${{ env.X86_32_TARGET_TEST_OPTIONS }} ${{ matrix.test_option }} -t ${{ matrix.running_mode }}
working-directory: ./tests/wamr-test-suites

2
.gitignore vendored
View File

@ -26,3 +26,5 @@ tests/wamr-test-suites/workspace
!/test-tools/wamr-ide/VSCode-Extension/.vscode
samples/socket-api/wasm-src/inc/pthread.h
**/__pycache__

View File

@ -13,6 +13,8 @@ WAMR project reused some components from other open source project:
- **WebAssembly debugging patch for LLDB**: for extending the ability of LLDB to support wasm debugging
- **libuv**: for the WASI Libc with uvwasi implementation
- **uvwasi**: for the WASI Libc with uvwasi implementation
- **asmjit**: for the Fast JIT x86-64 codegen implementation
- **zydis**: for the Fast JIT x86-64 codegen implementation
The WAMR fast interpreter is a clean room development. We would acknowledge the inspirations by [WASM3](https://github.com/wasm3/wasm3) open source project for the approach of pre-calculated oprand stack location.
@ -29,6 +31,8 @@ The WAMR fast interpreter is a clean room development. We would acknowledge the
| WebAssembly debugging patch for LLDB | unspecified | unspecified | https://reviews.llvm.org/D78801 | |
| libuv | v1.42.0 | v1.44.1 | https://github.com/libuv/libuv | https://www.cvedetails.com/vendor/15402/Libuv-Project.html |
| uvwasi | unspecified | v0.0.12 | https://github.com/nodejs/uvwasi | |
| asmjit | unspecified | unspecified | https://github.com/asmjit/asmjit | |
| zydis | unspecified | e14a07895136182a5b53e181eec3b1c6e0b434de | https://github.com/zyantific/zydis | |
## Licenses
@ -79,3 +83,9 @@ The WAMR fast interpreter is a clean room development. We would acknowledge the
### uvwasi
[LICENSE](./core/iwasm/libraries/libc-uvwasi/LICENSE_UVWASI)
### asmjit
[LICENSE](./core/iwasm/fast-jit/cg/LICENSE_ASMJIT)
### zydis
[LICENSE](./core/iwasm/fast-jit/cg/LICENSE_ZYDIS)

View File

@ -1,10 +1,11 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
cmake_minimum_required (VERSION 2.8...3.16)
cmake_minimum_required (VERSION 2.9)
project (iwasm)
# set (CMAKE_VERBOSE_MAKEFILE 1)
set (CMAKE_VERBOSE_MAKEFILE OFF)
string (TOLOWER ${CMAKE_HOST_SYSTEM_NAME} WAMR_BUILD_PLATFORM)
@ -52,6 +53,11 @@ if (NOT DEFINED WAMR_BUILD_JIT)
set (WAMR_BUILD_JIT 0)
endif ()
if (NOT DEFINED WAMR_BUILD_FAST_JIT)
# Disable Fast JIT by default
set (WAMR_BUILD_FAST_JIT 0)
endif ()
if (NOT DEFINED WAMR_BUILD_LIBC_BUILTIN)
# Enable libc builtin support by default
set (WAMR_BUILD_LIBC_BUILTIN 1)
@ -68,7 +74,7 @@ if (NOT DEFINED WAMR_BUILD_FAST_INTERP)
endif ()
if (NOT DEFINED WAMR_BUILD_MULTI_MODULE)
# Enable multiple modules
# Disable multiple modules by default
set (WAMR_BUILD_MULTI_MODULE 0)
endif ()
@ -94,6 +100,7 @@ endif ()
if (COLLECT_CODE_COVERAGE EQUAL 1)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fprofile-arcs -ftest-coverage")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
endif ()
set (WAMR_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR})
@ -102,9 +109,11 @@ include (${WAMR_ROOT_DIR}/build-scripts/runtime_lib.cmake)
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gc-sections -pie -fPIE")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra -Wformat -Wformat-security")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra -Wformat -Wformat-security -Wshadow")
# set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wconversion -Wsign-conversion")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wformat -Wformat-security -Wno-unused")
if (WAMR_BUILD_TARGET MATCHES "X86_.*" OR WAMR_BUILD_TARGET STREQUAL "AMD_64")
if (NOT (CMAKE_C_COMPILER MATCHES ".*clang.*" OR CMAKE_C_COMPILER_ID MATCHES ".*Clang"))
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mindirect-branch-register")

View File

@ -7,7 +7,7 @@ WebAssembly Micro Runtime
[BA]: https://bytecodealliance.org/
WebAssembly Micro Runtime (WAMR) is a lightweight standalone WebAssembly (WASM) runtime with small footprint, high performance and highly configurable features for applications cross from embedded, IoT, edge to Trusted Execution Environment (TEE), smart contract, cloud native and so on. It includes a few parts as below:
- The [**"iwasm" VM core**](./README.md#iwasm-vm-core) to run WASM applications, supporting interpreter mode, AOT mode (Ahead-of-Time compilation) and JIT mode (Just-in-Time compilation)
- The [**"iwasm" VM core**](./README.md#iwasm-vm-core) to run WASM applications, supporting interpreter mode, AOT mode (Ahead-of-Time compilation) and JIT modes (Just-in-Time compilation, LLVM JIT and Fast JIT are supported)
- The [**"wamrc" AOT compiler**](./README.md#build-wamrc-aot-compiler) to compile WASM file into AOT file for best performance and smaller runtime footprint, which is run by "iwasm" VM Core

View File

@ -116,6 +116,24 @@ else ()
unset (LLVM_AVAILABLE_LIBS)
endif ()
########################################
## semantic version information
if (NOT DEFINED WAMR_VERSION_MAJOR)
set (WAMR_VERSION_MAJOR 1)
endif ()
if (NOT DEFINED WAMR_VERSION_MINOR)
set (WAMR_VERSION_MINOR 0)
endif ()
if (NOT DEFINED WAMR_VERSION_PATCH)
set (WAMR_VERSION_PATCH 0)
endif ()
configure_file(${WAMR_ROOT_DIR}/core/version.h.in ${WAMR_ROOT_DIR}/core/version.h @ONLY)
########################################
message ("-- Build Configurations:")
message (" Build as target ${WAMR_BUILD_TARGET}")
message (" CMAKE_BUILD_TYPE " ${CMAKE_BUILD_TYPE})
@ -138,6 +156,8 @@ if (WAMR_BUILD_JIT EQUAL 1)
else ()
message (" WAMR LLVM MC JIT enabled")
endif ()
elseif (WAMR_BUILD_FAST_JIT EQUAL 1)
message (" WAMR Fast JIT enabled")
else ()
message (" WAMR JIT disabled")
endif ()
@ -189,6 +209,9 @@ endif ()
if (WAMR_BUILD_LIB_PTHREAD EQUAL 1)
message (" Lib pthread enabled")
endif ()
if (WAMR_BUILD_LIB_PTHREAD_SEMAPHORE EQUAL 1)
message (" Lib pthread semaphore enabled")
endif ()
if (WAMR_BUILD_LIBC_EMCC EQUAL 1)
message (" Libc emcc enabled")
endif ()
@ -257,3 +280,7 @@ if (WAMR_BUILD_LOAD_CUSTOM_SECTION EQUAL 1)
add_definitions (-DWASM_ENABLE_LOAD_CUSTOM_SECTION=1)
message (" Load custom section enabled")
endif ()
if (WAMR_BUILD_STACK_GUARD_SIZE GREATER 0)
add_definitions (-DWASM_STACK_GUARD_SIZE=${WAMR_BUILD_STACK_GUARD_SIZE})
message (" Custom stack guard size: " ${WAMR_BUILD_STACK_GUARD_SIZE})
endif ()

View File

@ -50,7 +50,11 @@ if (NOT DEFINED WAMR_BUILD_TARGET)
endif ()
################ optional according to settings ################
if (WAMR_BUILD_INTERP EQUAL 1 OR WAMR_BUILD_JIT EQUAL 1)
if (WAMR_BUILD_INTERP EQUAL 1 OR WAMR_BUILD_JIT EQUAL 1
OR WAMR_BUILD_FAST_JIT EQUAL 1)
if (WAMR_BUILD_FAST_JIT EQUAL 1)
set (WAMR_BUILD_FAST_INTERP 0)
endif ()
include (${IWASM_DIR}/interpreter/iwasm_interp.cmake)
endif ()
@ -61,6 +65,10 @@ if (WAMR_BUILD_AOT EQUAL 1)
endif ()
endif ()
if (NOT WAMR_BUILD_JIT EQUAL 1 AND WAMR_BUILD_FAST_JIT EQUAL 1)
include (${IWASM_DIR}/fast-jit/iwasm_fast_jit.cmake)
endif ()
if (WAMR_BUILD_APP_FRAMEWORK EQUAL 1)
include (${APP_FRAMEWORK_DIR}/app_framework.cmake)
include (${SHARED_DIR}/coap/lib_coap.cmake)
@ -78,6 +86,11 @@ elseif (WAMR_BUILD_LIBC_WASI EQUAL 1)
include (${IWASM_DIR}/libraries/libc-wasi/libc_wasi.cmake)
endif ()
if (WAMR_BUILD_LIB_PTHREAD_SEMAPHORE EQUAL 1)
# Enable the dependent feature if lib pthread semaphore is enabled
set (WAMR_BUILD_LIB_PTHREAD 1)
endif ()
if (WAMR_BUILD_LIB_PTHREAD EQUAL 1)
include (${IWASM_DIR}/libraries/lib-pthread/lib_pthread.cmake)
# Enable the dependent feature if lib pthread is enabled
@ -139,6 +152,7 @@ set (source_all
${IWASM_INTERP_SOURCE}
${IWASM_AOT_SOURCE}
${IWASM_COMPL_SOURCE}
${IWASM_FAST_JIT_SOURCE}
${WASM_APP_LIB_SOURCE_ALL}
${NATIVE_INTERFACE_SOURCE}
${APP_MGR_SOURCE}

View File

@ -33,6 +33,10 @@ function (add_module_native arg)
${APP_FRAMEWORK_ROOT_DIR}/${ARGV0}/native/*.h
${APP_FRAMEWORK_ROOT_DIR}/${ARGV0}/native/*.inl
)
LIST (APPEND WASM_APP_LIBS_DIR ${APP_FRAMEWORK_ROOT_DIR}/${ARGV0}/native)
set (WASM_APP_LIBS_DIR ${WASM_APP_LIBS_DIR} PARENT_SCOPE)
LIST (APPEND RUNTIME_LIB_HEADER_LIST ${header})
set (RUNTIME_LIB_HEADER_LIST ${RUNTIME_LIB_HEADER_LIST} PARENT_SCOPE)

View File

@ -94,6 +94,18 @@
#define WASM_ENABLE_LAZY_JIT 0
#endif
#ifndef WASM_ENABLE_FAST_JIT
#define WASM_ENABLE_FAST_JIT 0
#endif
#ifndef WASM_ENABLE_FAST_JIT_DUMP
#define WASM_ENABLE_FAST_JIT_DUMP 0
#endif
#ifndef FAST_JIT_DEFAULT_CODE_CACHE_SIZE
#define FAST_JIT_DEFAULT_CODE_CACHE_SIZE 10 * 1024 * 1024
#endif
#ifndef WASM_ENABLE_WAMR_COMPILER
#define WASM_ENABLE_WAMR_COMPILER 0
#endif
@ -127,6 +139,10 @@
#define WASM_ENABLE_LIB_PTHREAD 0
#endif
#ifndef WASM_ENABLE_LIB_PTHREAD_SEMAPHORE
#define WASM_ENABLE_LIB_PTHREAD_SEMAPHORE 0
#endif
#ifndef WASM_ENABLE_BASE_LIB
#define WASM_ENABLE_BASE_LIB 0
#endif
@ -327,12 +343,12 @@
/* Reserved bytes to the native thread stack boundary, throw native
stack overflow exception if the guard boudary is reached */
#ifndef RESERVED_BYTES_TO_NATIVE_STACK_BOUNDARY
#ifndef WASM_STACK_GUARD_SIZE
#if WASM_ENABLE_UVWASI != 0
/* UVWASI requires larger native stack */
#define RESERVED_BYTES_TO_NATIVE_STACK_BOUNDARY (4096 * 6)
#define WASM_STACK_GUARD_SIZE (4096 * 6)
#else
#define RESERVED_BYTES_TO_NATIVE_STACK_BOUNDARY (1024)
#define WASM_STACK_GUARD_SIZE (1024)
#endif
#endif

View File

@ -61,8 +61,14 @@ static const aot_intrinsic g_intrinsic_mapping[] = {
{ "f64_promote_f32", "aot_intrinsic_f32_to_f64", AOT_INTRINSIC_FLAG_F32_TO_F64 },
{ "f32_cmp", "aot_intrinsic_f32_cmp", AOT_INTRINSIC_FLAG_F32_CMP },
{ "f64_cmp", "aot_intrinsic_f64_cmp", AOT_INTRINSIC_FLAG_F64_CMP },
{ "f32.const", NULL, AOT_INTRINSIC_FLAG_F32_CONST},
{ "f64.const", NULL, AOT_INTRINSIC_FLAG_F64_CONST},
{ "i32.const", NULL, AOT_INTRINSIC_FLAG_I32_CONST },
{ "i64.const", NULL, AOT_INTRINSIC_FLAG_I64_CONST },
{ "f32.const", NULL, AOT_INTRINSIC_FLAG_F32_CONST },
{ "f64.const", NULL, AOT_INTRINSIC_FLAG_F64_CONST },
{ "i64.div_s", "aot_intrinsic_i64_div_s", AOT_INTRINSIC_FLAG_I64_DIV_S},
{ "i64.div_u", "aot_intrinsic_i64_div_u", AOT_INTRINSIC_FLAG_I64_DIV_U},
{ "i64.rem_s", "aot_intrinsic_i64_rem_s", AOT_INTRINSIC_FLAG_I64_REM_S},
{ "i64.rem_u", "aot_intrinsic_i64_rem_u", AOT_INTRINSIC_FLAG_I64_REM_U},
};
/* clang-format on */
@ -485,6 +491,30 @@ aot_intrinsic_f64_cmp(AOTFloatCond cond, float64 lhs, float64 rhs)
return 0;
}
int64
aot_intrinsic_i64_div_s(int64 l, int64 r)
{
return l / r;
}
uint64
aot_intrinsic_i64_div_u(uint64 l, uint64 r)
{
return l / r;
}
int64
aot_intrinsic_i64_rem_s(int64 l, int64 r)
{
return l % r;
}
uint64
aot_intrinsic_i64_rem_u(uint64 l, uint64 r)
{
return l % r;
}
const char *
aot_intrinsic_get_symbol(const char *llvm_intrinsic)
{
@ -512,6 +542,15 @@ add_intrinsic_capability(AOTCompContext *comp_ctx, uint64 flag)
}
}
static void
add_i64_common_intrinsics(AOTCompContext *comp_ctx)
{
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_I64_DIV_S);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_I64_DIV_U);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_I64_REM_S);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_I64_REM_U);
}
static void
add_f32_common_intrinsics(AOTCompContext *comp_ctx)
{
@ -619,6 +658,22 @@ aot_intrinsic_fill_capability_flags(AOTCompContext *comp_ctx)
add_f64_common_intrinsics(comp_ctx);
add_common_float_integer_convertion(comp_ctx);
}
else if (!strncmp(comp_ctx->target_arch, "riscv32", 7)) {
add_i64_common_intrinsics(comp_ctx);
}
else if (!strncmp(comp_ctx->target_arch, "xtensa", 6)) {
/*
* Note: Use builtin intrinsics since hardware float operation
* will cause rodata relocation
*/
add_f32_common_intrinsics(comp_ctx);
add_f64_common_intrinsics(comp_ctx);
add_common_float_integer_convertion(comp_ctx);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_F32_CONST);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_F64_CONST);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_I32_CONST);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_I64_CONST);
}
else {
/*
* Use constant value table by default

View File

@ -58,6 +58,7 @@ extern "C" {
#define AOT_INTRINSIC_FLAG_F32_TO_F64 AOT_INTRINSIC_FLAG(0, 24)
#define AOT_INTRINSIC_FLAG_F32_CMP AOT_INTRINSIC_FLAG(0, 25)
#define AOT_INTRINSIC_FLAG_F32_CONST AOT_INTRINSIC_FLAG(0, 26)
#define AOT_INTRINSIC_FLAG_I32_CONST AOT_INTRINSIC_FLAG(0, 27)
#define AOT_INTRINSIC_FLAG_F64_FADD AOT_INTRINSIC_FLAG(1, 0)
#define AOT_INTRINSIC_FLAG_F64_FSUB AOT_INTRINSIC_FLAG(1, 1)
@ -86,6 +87,12 @@ extern "C" {
#define AOT_INTRINSIC_FLAG_F64_TO_F32 AOT_INTRINSIC_FLAG(1, 24)
#define AOT_INTRINSIC_FLAG_F64_CMP AOT_INTRINSIC_FLAG(1, 25)
#define AOT_INTRINSIC_FLAG_F64_CONST AOT_INTRINSIC_FLAG(1, 26)
#define AOT_INTRINSIC_FLAG_I64_CONST AOT_INTRINSIC_FLAG(1, 27)
#define AOT_INTRINSIC_FLAG_I64_DIV_S AOT_INTRINSIC_FLAG(1, 28)
#define AOT_INTRINSIC_FLAG_I64_DIV_U AOT_INTRINSIC_FLAG(1, 29)
#define AOT_INTRINSIC_FLAG_I64_REM_S AOT_INTRINSIC_FLAG(1, 30)
#define AOT_INTRINSIC_FLAG_I64_REM_U AOT_INTRINSIC_FLAG(1, 31)
/* clang-format on */
float32
@ -244,6 +251,18 @@ aot_intrinsic_f32_cmp(AOTFloatCond cond, float32 lhs, float32 rhs);
int32
aot_intrinsic_f64_cmp(AOTFloatCond cond, float64 lhs, float64 rhs);
int64
aot_intrinsic_i64_div_s(int64 l, int64 r);
uint64
aot_intrinsic_i64_div_u(uint64 l, uint64 r);
int64
aot_intrinsic_i64_rem_s(int64 l, int64 r);
uint64
aot_intrinsic_i64_rem_u(uint64 l, uint64 r);
const char *
aot_intrinsic_get_symbol(const char *llvm_intrinsic);

View File

@ -482,7 +482,7 @@ load_native_symbol_section(const uint8 *buf, const uint8 *buf_end,
for (i = cnt - 1; i >= 0; i--) {
read_string(p, p_end, symbol);
if (!strncmp(symbol, "f32#", 4)) {
if (!strncmp(symbol, "f32#", 4) || !strncmp(symbol, "i32#", 4)) {
uint32 u32;
/* Resolve the raw int bits of f32 const */
if (!str2uint32(symbol + 4, &u32)) {
@ -492,7 +492,8 @@ load_native_symbol_section(const uint8 *buf, const uint8 *buf_end,
}
*(uint32 *)(&module->native_symbol_list[i]) = u32;
}
else if (!strncmp(symbol, "f64#", 4)) {
else if (!strncmp(symbol, "f64#", 4)
|| !strncmp(symbol, "i64#", 4)) {
uint64 u64;
/* Resolve the raw int bits of f64 const */
if (!str2uint64(symbol + 4, &u64)) {
@ -3135,6 +3136,8 @@ aot_convert_wasm_module(WASMModule *wasm_module, char *error_buf,
}
option.is_jit_mode = true;
option.opt_level = 3;
option.size_level = 3;
#if WASM_ENABLE_BULK_MEMORY != 0
option.enable_bulk_memory = true;
#endif

View File

@ -106,6 +106,10 @@ typedef struct {
REG_SYM(aot_intrinsic_f32_to_f64), \
REG_SYM(aot_intrinsic_f32_cmp), \
REG_SYM(aot_intrinsic_f64_cmp), \
REG_SYM(aot_intrinsic_i64_div_s), \
REG_SYM(aot_intrinsic_i64_div_u), \
REG_SYM(aot_intrinsic_i64_rem_s), \
REG_SYM(aot_intrinsic_i64_rem_u), \
#define REG_COMMON_SYMBOLS \
REG_SYM(aot_set_exception_with_id), \

View File

@ -2205,28 +2205,6 @@ aot_enlarge_memory(AOTModuleInstance *module_inst, uint32 inc_page_count)
}
#endif /* end of OS_ENABLE_HW_BOUND_CHECK */
bool
aot_is_wasm_type_equal(AOTModuleInstance *module_inst, uint32 type1_idx,
uint32 type2_idx)
{
WASMType *type1, *type2;
AOTModule *module = (AOTModule *)module_inst->aot_module.ptr;
if (type1_idx >= module->func_type_count
|| type2_idx >= module->func_type_count) {
aot_set_exception(module_inst, "type index out of bounds");
return false;
}
if (type1_idx == type2_idx)
return true;
type1 = module->func_types[type1_idx];
type2 = module->func_types[type2_idx];
return wasm_type_equal(type1, type2);
}
bool
aot_invoke_native(WASMExecEnv *exec_env, uint32 func_idx, uint32 argc,
uint32 *argv)

View File

@ -373,19 +373,15 @@ typedef struct AOTModuleInstance {
AOTPointer wasi_ctx;
/* function performance profiling info list */
AOTPointer func_perf_profilings;
/* stack frames, used in call stack dump and perf profiling */
AOTPointer frames;
AOTPointer exec_env_singleton;
/* others */
uint32 temp_ret;
uint32 llvm_stack;
uint32 default_wasm_stack_size;
uint32 _padding;
/* store stacktrace information */
AOTPointer frames;
/* reserved */
uint32 reserved[6];
uint32 reserved[9];
/*
* +------------------------------+ <-- memories.ptr
@ -628,19 +624,6 @@ aot_get_native_addr_range(AOTModuleInstance *module_inst, uint8 *native_ptr,
bool
aot_enlarge_memory(AOTModuleInstance *module_inst, uint32 inc_page_count);
/**
* Compare whether two wasm types are equal according to the indexs
*
* @param module_inst the AOT module instance
* @param type1_idx index of the first wasm type
* @param type2_idx index of the second wasm type
*
* @return true if equal, false otherwise
*/
bool
aot_is_wasm_type_equal(AOTModuleInstance *module_inst, uint32 type1_idx,
uint32 type2_idx);
/**
* Invoke native function from aot code
*/

View File

@ -51,6 +51,8 @@ void __mulsf3();
void __subdf3();
void __subsf3();
void __truncdfsf2();
void __floatunsisf();
void __fixunsdfsi();
#else
void __ac_push_13_to_13();
void __ac_push_13_to_14();
@ -158,6 +160,8 @@ static SymbolMap target_sym_map[] = {
REG_SYM(__subdf3),
REG_SYM(__subsf3),
REG_SYM(__truncdfsf2),
REG_SYM(__floatunsisf),
REG_SYM(__fixunsdfsi),
#else
REG_SYM(__ac_push_13_to_13),
REG_SYM(__ac_push_13_to_14),

View File

@ -3955,9 +3955,8 @@ interp_link(const wasm_instance_t *inst, const WASMModule *module_interp,
}
case IMPORT_KIND_MEMORY:
case IMPORT_KIND_TABLE:
ASSERT_NOT_IMPLEMENTED();
break;
default:
ASSERT_NOT_IMPLEMENTED();
LOG_WARNING("%s meets unsupported kind: %d", __FUNCTION__,
import_rt->kind);
goto failed;
@ -4175,9 +4174,8 @@ aot_link(const wasm_instance_t *inst, const AOTModule *module_aot,
break;
case WASM_EXTERN_MEMORY:
case WASM_EXTERN_TABLE:
ASSERT_NOT_IMPLEMENTED();
break;
default:
ASSERT_NOT_IMPLEMENTED();
goto failed;
}
}

View File

@ -73,6 +73,7 @@ wasm_exec_env_create_internal(struct WASMModuleInstanceCommon *module_inst,
#if WASM_ENABLE_MEMORY_TRACING != 0
wasm_runtime_dump_exec_env_mem_consumption(exec_env);
#endif
return exec_env;
#if WASM_ENABLE_THREAD_MGR != 0
@ -188,9 +189,10 @@ wasm_exec_env_set_module_inst(WASMExecEnv *exec_env,
void
wasm_exec_env_set_thread_info(WASMExecEnv *exec_env)
{
uint8 *stack_boundary = os_thread_get_stack_boundary();
exec_env->handle = os_self_thread();
exec_env->native_stack_boundary = os_thread_get_stack_boundary()
+ RESERVED_BYTES_TO_NATIVE_STACK_BOUNDARY;
exec_env->native_stack_boundary =
stack_boundary ? stack_boundary + WASM_STACK_GUARD_SIZE : NULL;
}
#if WASM_ENABLE_THREAD_MGR != 0

View File

@ -84,6 +84,17 @@ typedef struct WASMExecEnv {
void **native_symbol;
#endif
#if WASM_ENABLE_FAST_JIT != 0
/**
* Cache for
* - jit native operations in 32-bit target which hasn't 64-bit
* int/float registers, mainly for the operations of double and int64,
* such as F64TOI64, F32TOI64, I64 MUL/REM, and so on.
* - SSE instructions.
**/
uint64 jit_cache[2];
#endif
#if WASM_ENABLE_THREAD_MGR != 0
/* thread return value */
void *thread_ret_value;

View File

@ -27,7 +27,11 @@
#if WASM_ENABLE_SHARED_MEMORY != 0
#include "wasm_shared_memory.h"
#endif
#if WASM_ENABLE_FAST_JIT != 0
#include "../fast-jit/jit_compiler.h"
#endif
#include "../common/wasm_c_api_internal.h"
#include "../../version.h"
/**
* For runtime build, BH_MALLOC/BH_FREE should be defined as
@ -117,6 +121,10 @@ runtime_malloc(uint64 size, WASMModuleInstanceCommon *module_inst,
return mem;
}
#if WASM_ENABLE_FAST_JIT != 0
static JitCompOptions jit_options = { 0 };
#endif
#ifdef OS_ENABLE_HW_BOUND_CHECK
/* The exec_env of thread local storage, set before calling function
and used in signal handler, as we cannot get it from the argument
@ -259,8 +267,20 @@ wasm_runtime_env_init()
}
#endif
#if WASM_ENABLE_FAST_JIT != 0
if (!jit_compiler_init(&jit_options)) {
goto fail9;
}
#endif
return true;
#if WASM_ENABLE_FAST_JIT != 0
fail9:
#if WASM_ENABLE_REF_TYPES != 0
wasm_externref_map_destroy();
#endif
#endif
#if WASM_ENABLE_REF_TYPES != 0
fail8:
#endif
@ -321,6 +341,10 @@ wasm_runtime_init()
void
wasm_runtime_destroy()
{
#if WASM_ENABLE_FAST_JIT != 0
jit_compiler_destroy();
#endif
#if WASM_ENABLE_REF_TYPES != 0
wasm_externref_map_destroy();
#endif
@ -368,6 +392,10 @@ wasm_runtime_full_init(RuntimeInitArgs *init_args)
&init_args->mem_alloc_option))
return false;
#if WASM_ENABLE_FAST_JIT != 0
jit_options.code_cache_size = init_args->fast_jit_code_cache_size;
#endif
if (!wasm_runtime_env_init()) {
wasm_runtime_memory_destroy();
return false;
@ -478,9 +506,17 @@ wasm_runtime_is_xip_file(const uint8 *buf, uint32 size)
uint32
wasm_runtime_start_debug_instance(WASMExecEnv *exec_env)
{
WASMModuleInstanceCommon *module_inst =
wasm_runtime_get_module_inst(exec_env);
WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
bh_assert(module_inst);
bh_assert(cluster);
if (module_inst->module_type != Wasm_Module_Bytecode) {
LOG_WARNING("Attempt to create a debug instance for an AOT module");
return 0;
}
if (cluster->debug_inst) {
LOG_WARNING("Cluster already bind to a debug instance");
return cluster->debug_inst->control_thread->port;
@ -1942,28 +1978,6 @@ fail1:
return ret;
}
bool
wasm_runtime_create_exec_env_and_call_wasm(
WASMModuleInstanceCommon *module_inst, WASMFunctionInstanceCommon *function,
uint32 argc, uint32 argv[])
{
bool ret = false;
#if WASM_ENABLE_INTERP != 0
if (module_inst->module_type == Wasm_Module_Bytecode)
ret = wasm_create_exec_env_and_call_function(
(WASMModuleInstance *)module_inst, (WASMFunctionInstance *)function,
argc, argv, true);
#endif
#if WASM_ENABLE_AOT != 0
if (module_inst->module_type == Wasm_Module_AoT)
ret = aot_create_exec_env_and_call_function(
(AOTModuleInstance *)module_inst, (AOTFunctionInstance *)function,
argc, argv);
#endif
return ret;
}
bool
wasm_runtime_create_exec_env_singleton(WASMModuleInstanceCommon *module_inst)
{
@ -2287,70 +2301,6 @@ wasm_runtime_get_native_addr_range(WASMModuleInstanceCommon *module_inst,
return false;
}
uint32
wasm_runtime_get_temp_ret(WASMModuleInstanceCommon *module_inst)
{
#if WASM_ENABLE_INTERP != 0
if (module_inst->module_type == Wasm_Module_Bytecode)
return ((WASMModuleInstance *)module_inst)->temp_ret;
#endif
#if WASM_ENABLE_AOT != 0
if (module_inst->module_type == Wasm_Module_AoT)
return ((AOTModuleInstance *)module_inst)->temp_ret;
#endif
return 0;
}
void
wasm_runtime_set_temp_ret(WASMModuleInstanceCommon *module_inst,
uint32 temp_ret)
{
#if WASM_ENABLE_INTERP != 0
if (module_inst->module_type == Wasm_Module_Bytecode) {
((WASMModuleInstance *)module_inst)->temp_ret = temp_ret;
return;
}
#endif
#if WASM_ENABLE_AOT != 0
if (module_inst->module_type == Wasm_Module_AoT) {
((AOTModuleInstance *)module_inst)->temp_ret = temp_ret;
return;
}
#endif
}
uint32
wasm_runtime_get_llvm_stack(WASMModuleInstanceCommon *module_inst)
{
#if WASM_ENABLE_INTERP != 0
if (module_inst->module_type == Wasm_Module_Bytecode)
return ((WASMModuleInstance *)module_inst)->llvm_stack;
#endif
#if WASM_ENABLE_AOT != 0
if (module_inst->module_type == Wasm_Module_AoT)
return ((AOTModuleInstance *)module_inst)->llvm_stack;
#endif
return 0;
}
void
wasm_runtime_set_llvm_stack(WASMModuleInstanceCommon *module_inst,
uint32 llvm_stack)
{
#if WASM_ENABLE_INTERP != 0
if (module_inst->module_type == Wasm_Module_Bytecode) {
((WASMModuleInstance *)module_inst)->llvm_stack = llvm_stack;
return;
}
#endif
#if WASM_ENABLE_AOT != 0
if (module_inst->module_type == Wasm_Module_AoT) {
((AOTModuleInstance *)module_inst)->llvm_stack = llvm_stack;
return;
}
#endif
}
bool
wasm_runtime_enlarge_memory(WASMModuleInstanceCommon *module,
uint32 inc_page_count)
@ -5092,3 +5042,11 @@ wasm_runtime_destroy_custom_sections(WASMCustomSection *section_list)
}
}
#endif /* end of WASM_ENABLE_LOAD_CUSTOM_SECTION */
void
wasm_runtime_get_version(uint32_t *major, uint32_t *minor, uint32_t *patch)
{
*major = WAMR_VERSION_MAJOR;
*minor = WAMR_VERSION_MINOR;
*patch = WAMR_VERSION_PATCH;
}

View File

@ -592,7 +592,8 @@ wasm_runtime_call_indirect(WASMExecEnv *exec_env, uint32 element_indices,
bool
wasm_runtime_create_exec_env_singleton(WASMModuleInstanceCommon *module_inst);
WASMExecEnv *
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN WASMExecEnv *
wasm_runtime_get_exec_env_singleton(WASMModuleInstanceCommon *module_inst);
/* See wasm_export.h for description */
@ -689,20 +690,6 @@ WASM_RUNTIME_API_EXTERN const uint8 *
wasm_runtime_get_custom_section(WASMModuleCommon *const module_comm,
const char *name, uint32 *len);
uint32
wasm_runtime_get_temp_ret(WASMModuleInstanceCommon *module_inst);
void
wasm_runtime_set_temp_ret(WASMModuleInstanceCommon *module_inst,
uint32 temp_ret);
uint32
wasm_runtime_get_llvm_stack(WASMModuleInstanceCommon *module_inst);
void
wasm_runtime_set_llvm_stack(WASMModuleInstanceCommon *module_inst,
uint32 llvm_stack);
#if WASM_ENABLE_MULTI_MODULE != 0
WASM_RUNTIME_API_EXTERN void
wasm_runtime_set_module_reader(const module_reader reader,

View File

@ -2643,9 +2643,8 @@ apply_func_passes(AOTCompContext *comp_ctx)
LLVMAddLoopRotatePass(pass_mgr);
#if LLVM_VERSION_MAJOR < 15
LLVMAddLoopUnswitchPass(pass_mgr);
/* Binding disabled in LLVM 15, don't add the pass util we can either
add a binding to SimpleLoopUnswitchPass, or add it to
aot_llvm_extra.cpp */
#else
aot_add_simple_loop_unswitch_pass(pass_mgr);
#endif
LLVMAddInstructionCombiningPass(pass_mgr);
LLVMAddCFGSimplificationPass(pass_mgr);
@ -2743,8 +2742,9 @@ aot_require_lower_switch_pass(AOTCompContext *comp_ctx)
{
bool ret = false;
/* IR switch/case will cause .rodata relocation on riscv */
if (!strncmp(comp_ctx->target_arch, "riscv", 5)) {
/* IR switch/case will cause .rodata relocation on riscv/xtensa */
if (!strncmp(comp_ctx->target_arch, "riscv", 5)
|| !strncmp(comp_ctx->target_arch, "xtensa", 6)) {
ret = true;
}

View File

@ -10,8 +10,23 @@ bool
aot_compile_op_i32_const(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
int32 i32_const)
{
LLVMValueRef value = I32_CONST((uint32)i32_const);
CHECK_LLVM_CONST(value);
LLVMValueRef value;
if (comp_ctx->is_indirect_mode
&& aot_intrinsic_check_capability(comp_ctx, "i32.const")) {
WASMValue wasm_value;
wasm_value.i32 = i32_const;
value = aot_load_const_from_table(comp_ctx, func_ctx->native_symbol,
&wasm_value, VALUE_TYPE_I32);
if (!value) {
return false;
}
}
else {
value = I32_CONST((uint32)i32_const);
CHECK_LLVM_CONST(value);
}
PUSH_I32(value);
return true;
fail:
@ -22,8 +37,23 @@ bool
aot_compile_op_i64_const(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
int64 i64_const)
{
LLVMValueRef value = I64_CONST((uint64)i64_const);
CHECK_LLVM_CONST(value);
LLVMValueRef value;
if (comp_ctx->is_indirect_mode
&& aot_intrinsic_check_capability(comp_ctx, "i64.const")) {
WASMValue wasm_value;
wasm_value.i64 = i64_const;
value = aot_load_const_from_table(comp_ctx, func_ctx->native_symbol,
&wasm_value, VALUE_TYPE_I64);
if (!value) {
return false;
}
}
else {
value = I64_CONST((uint64)i64_const);
CHECK_LLVM_CONST(value);
}
PUSH_I64(value);
return true;
fail:

View File

@ -97,7 +97,19 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64)) ? true : false;
CHECK_LLVM_CONST(offset_const);
if (comp_ctx->is_indirect_mode
&& aot_intrinsic_check_capability(comp_ctx, "i32.const")) {
WASMValue wasm_value;
wasm_value.i32 = offset;
offset_const = aot_load_const_from_table(
comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_I32);
if (!offset_const) {
return NULL;
}
}
else {
CHECK_LLVM_CONST(offset_const);
}
/* Get memory base address and memory data size */
if (func_ctx->mem_space_unchanged

View File

@ -388,6 +388,9 @@ compile_int_div(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
{
LLVMValueRef left, right, cmp_div_zero, overflow, res;
LLVMBasicBlockRef check_div_zero_succ, check_overflow_succ;
LLVMTypeRef param_types[2];
param_types[1] = param_types[0] = is_i32 ? I32_TYPE : I64_TYPE;
bh_assert(arith_op == INT_DIV_S || arith_op == INT_DIV_U
|| arith_op == INT_REM_S || arith_op == INT_REM_U);
@ -459,16 +462,56 @@ compile_int_div(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
/* Build div */
switch (arith_op) {
case INT_DIV_S:
LLVM_BUILD_OP(SDiv, left, right, res, "div_s", false);
if (comp_ctx->disable_llvm_intrinsics && !is_i32
&& aot_intrinsic_check_capability(comp_ctx,
"i64.div_s")) {
res = aot_call_llvm_intrinsic(
comp_ctx, func_ctx, "i64.div_s", param_types[0],
param_types, 2, left, right);
}
else {
LLVM_BUILD_OP(SDiv, left, right, res, "div_s",
false);
}
break;
case INT_DIV_U:
LLVM_BUILD_OP(UDiv, left, right, res, "div_u", false);
if (comp_ctx->disable_llvm_intrinsics && !is_i32
&& aot_intrinsic_check_capability(comp_ctx,
"i64.div_u")) {
res = aot_call_llvm_intrinsic(
comp_ctx, func_ctx, "i64.div_u", param_types[0],
param_types, 2, left, right);
}
else {
LLVM_BUILD_OP(UDiv, left, right, res, "div_u",
false);
}
break;
case INT_REM_S:
LLVM_BUILD_OP(SRem, left, right, res, "rem_s", false);
if (comp_ctx->disable_llvm_intrinsics && !is_i32
&& aot_intrinsic_check_capability(comp_ctx,
"i64.rem_s")) {
res = aot_call_llvm_intrinsic(
comp_ctx, func_ctx, "i64.rem_s", param_types[0],
param_types, 2, left, right);
}
else {
LLVM_BUILD_OP(SRem, left, right, res, "rem_s",
false);
}
break;
case INT_REM_U:
LLVM_BUILD_OP(URem, left, right, res, "rem_u", false);
if (comp_ctx->disable_llvm_intrinsics && !is_i32
&& aot_intrinsic_check_capability(comp_ctx,
"i64.rem_u")) {
res = aot_call_llvm_intrinsic(
comp_ctx, func_ctx, "i64.rem_u", param_types[0],
param_types, 2, left, right);
}
else {
LLVM_BUILD_OP(URem, left, right, res, "rem_u",
false);
}
break;
default:
bh_assert(0);
@ -800,9 +843,10 @@ is_targeting_soft_float(AOTCompContext *comp_ctx, bool is_f32)
* so user must specify '--cpu-features=+soft-float' to wamrc if the target
* doesn't have or enable FPU on arm, x86 or mips. */
if (is_target_arm(comp_ctx) || is_target_x86(comp_ctx)
|| is_target_mips(comp_ctx))
|| is_target_mips(comp_ctx)) {
ret = strstr(feature_string, "+soft-float") ? true : false;
else if (is_target_xtensa(comp_ctx))
}
else if (is_target_xtensa(comp_ctx)) {
/* Note:
* 1. The Floating-Point Coprocessor Option of xtensa only support
* single-precision floating-point operations, so must use soft-float
@ -811,7 +855,11 @@ is_targeting_soft_float(AOTCompContext *comp_ctx, bool is_f32)
* so user must specify '--cpu-features=-fp' to wamrc if the target
* doesn't have or enable Floating-Point Coprocessor Option on xtensa.
*/
ret = (!is_f32 || strstr(feature_string, "-fp")) ? true : false;
if (comp_ctx->disable_llvm_intrinsics)
ret = false;
else
ret = (!is_f32 || strstr(feature_string, "-fp")) ? true : false;
}
else if (is_target_riscv(comp_ctx)) {
/*
* Note: Use builtin intrinsics since hardware float operation
@ -823,8 +871,9 @@ is_targeting_soft_float(AOTCompContext *comp_ctx, bool is_f32)
else
ret = !strstr(feature_string, "+d") ? true : false;
}
else
else {
ret = true;
}
LLVMDisposeMessage(feature_string);
return ret;

View File

@ -2731,6 +2731,18 @@ aot_load_const_from_table(AOTCompContext *comp_ctx, LLVMValueRef base,
int32 index;
switch (value_type) {
case VALUE_TYPE_I32:
/* Store the raw int bits of i32 const as a hex string */
snprintf(buf, sizeof(buf), "i32#%08" PRIX32, value->i32);
const_ptr_type = INT32_PTR_TYPE;
const_type = I32_TYPE;
break;
case VALUE_TYPE_I64:
/* Store the raw int bits of i64 const as a hex string */
snprintf(buf, sizeof(buf), "i64#%016" PRIX64, value->i64);
const_ptr_type = INT64_PTR_TYPE;
const_type = I64_TYPE;
break;
case VALUE_TYPE_F32:
/* Store the raw int bits of f32 const as a hex string */
snprintf(buf, sizeof(buf), "f32#%08" PRIX32, value->i32);

View File

@ -499,6 +499,9 @@ aot_check_simd_compatibility(const char *arch_c_str, const char *cpu_c_str);
void
aot_add_expand_memory_op_pass(LLVMPassManagerRef pass);
void
aot_add_simple_loop_unswitch_pass(LLVMPassManagerRef pass);
void
aot_apply_llvm_new_pass_manager(AOTCompContext *comp_ctx);

View File

@ -66,6 +66,9 @@ aot_check_simd_compatibility(const char *arch_c_str, const char *cpu_c_str);
void
aot_add_expand_memory_op_pass(LLVMPassManagerRef pass);
void
aot_add_simple_loop_unswitch_pass(LLVMPassManagerRef pass);
void
aot_func_disable_tce(LLVMValueRef func);
@ -258,6 +261,12 @@ aot_add_expand_memory_op_pass(LLVMPassManagerRef pass)
unwrap(pass)->add(new ExpandMemoryOpPass());
}
void
aot_add_simple_loop_unswitch_pass(LLVMPassManagerRef pass)
{
unwrap(pass)->add(createSimpleLoopUnswitchLegacyPass());
}
bool
aot_check_simd_compatibility(const char *arch_c_str, const char *cpu_c_str)
{

View File

@ -0,0 +1,42 @@
diff --git a/src/asmjit/core/cpuinfo.cpp b/src/asmjit/core/cpuinfo.cpp
index 7bf7407..ae2160b 100644
--- a/src/asmjit/core/cpuinfo.cpp
+++ b/src/asmjit/core/cpuinfo.cpp
@@ -9,13 +9,13 @@
#if !defined(_WIN32)
#include <errno.h>
- #include <sys/utsname.h>
+ //#include <sys/utsname.h>
#include <unistd.h>
#endif
// Required by `getauxval()` on Linux.
#if defined(__linux__)
- #include <sys/auxv.h>
+ //#include <sys/auxv.h>
#endif
//! Required to detect CPU and features on Apple platforms.
diff --git a/src/asmjit/core/globals.cpp b/src/asmjit/core/globals.cpp
index 2bbd0c0..e6b69e5 100644
--- a/src/asmjit/core/globals.cpp
+++ b/src/asmjit/core/globals.cpp
@@ -105,6 +105,8 @@ ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
#endif
}
+extern "C" int os_printf(const char *message, ...);
+
// DebugUtils - Debug Output
// =========================
@@ -112,7 +114,7 @@ ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept {
#if defined(_WIN32)
::OutputDebugStringA(str);
#else
- ::fputs(str, stderr);
+ os_printf(str);
#endif
}

View File

@ -0,0 +1,17 @@
Copyright (c) 2008-2020 The AsmJit Authors
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.

View File

@ -0,0 +1,23 @@
The MIT License (MIT)
Copyright (c) 2014-2021 Florian Bernd
Copyright (c) 2014-2021 Joel Höner
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,345 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_compare.h"
#include "jit_emit_function.h"
#include "../jit_frontend.h"
#include "../jit_codegen.h"
static bool
jit_compile_op_compare_integer(JitCompContext *cc, IntCond cond, bool is64Bit)
{
JitReg lhs, rhs, res, const_zero, const_one;
if (cond < INT_EQZ || cond > INT_GE_U) {
jit_set_last_error(cc, "unsupported comparation operation");
goto fail;
}
res = jit_cc_new_reg_I32(cc);
const_zero = NEW_CONST(I32, 0);
const_one = NEW_CONST(I32, 1);
if (is64Bit) {
if (INT_EQZ == cond) {
rhs = NEW_CONST(I64, 0);
}
else {
POP_I64(rhs);
}
POP_I64(lhs);
}
else {
if (INT_EQZ == cond) {
rhs = NEW_CONST(I32, 0);
}
else {
POP_I32(rhs);
}
POP_I32(lhs);
}
GEN_INSN(CMP, cc->cmp_reg, lhs, rhs);
switch (cond) {
case INT_EQ:
case INT_EQZ:
{
GEN_INSN(SELECTEQ, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_NE:
{
GEN_INSN(SELECTNE, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_LT_S:
{
GEN_INSN(SELECTLTS, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_LT_U:
{
GEN_INSN(SELECTLTU, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_GT_S:
{
GEN_INSN(SELECTGTS, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_GT_U:
{
GEN_INSN(SELECTGTU, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_LE_S:
{
GEN_INSN(SELECTLES, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_LE_U:
{
GEN_INSN(SELECTLEU, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_GE_S:
{
GEN_INSN(SELECTGES, res, cc->cmp_reg, const_one, const_zero);
break;
}
default: /* INT_GE_U */
{
GEN_INSN(SELECTGEU, res, cc->cmp_reg, const_one, const_zero);
break;
}
}
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i32_compare(JitCompContext *cc, IntCond cond)
{
return jit_compile_op_compare_integer(cc, cond, false);
}
bool
jit_compile_op_i64_compare(JitCompContext *cc, IntCond cond)
{
return jit_compile_op_compare_integer(cc, cond, true);
}
static int32
float_cmp_eq(float f1, float f2)
{
if (isnan(f1) || isnan(f2))
return 0;
return f1 == f2;
}
static int32
float_cmp_ne(float f1, float f2)
{
if (isnan(f1) || isnan(f2))
return 1;
return f1 != f2;
}
static int32
double_cmp_eq(double d1, double d2)
{
if (isnan(d1) || isnan(d2))
return 0;
return d1 == d2;
}
static int32
double_cmp_ne(double d1, double d2)
{
if (isnan(d1) || isnan(d2))
return 1;
return d1 != d2;
}
static bool
jit_compile_op_compare_float_point(JitCompContext *cc, FloatCond cond,
JitReg lhs, JitReg rhs)
{
JitReg res, args[2], const_zero, const_one;
JitRegKind kind;
void *func;
if (cond == FLOAT_EQ || cond == FLOAT_NE) {
kind = jit_reg_kind(lhs);
if (cond == FLOAT_EQ)
func = (kind == JIT_REG_KIND_F32) ? (void *)float_cmp_eq
: (void *)double_cmp_eq;
else
func = (kind == JIT_REG_KIND_F32) ? (void *)float_cmp_ne
: (void *)double_cmp_ne;
res = jit_cc_new_reg_I32(cc);
args[0] = lhs;
args[1] = rhs;
if (!jit_emit_callnative(cc, func, res, args, 2)) {
goto fail;
}
}
else {
res = jit_cc_new_reg_I32(cc);
const_zero = NEW_CONST(I32, 0);
const_one = NEW_CONST(I32, 1);
switch (cond) {
case FLOAT_LT:
{
GEN_INSN(CMP, cc->cmp_reg, rhs, lhs);
GEN_INSN(SELECTGTS, res, cc->cmp_reg, const_one, const_zero);
break;
}
case FLOAT_GT:
{
GEN_INSN(CMP, cc->cmp_reg, lhs, rhs);
GEN_INSN(SELECTGTS, res, cc->cmp_reg, const_one, const_zero);
break;
}
case FLOAT_LE:
{
GEN_INSN(CMP, cc->cmp_reg, rhs, lhs);
GEN_INSN(SELECTGES, res, cc->cmp_reg, const_one, const_zero);
break;
}
case FLOAT_GE:
{
GEN_INSN(CMP, cc->cmp_reg, lhs, rhs);
GEN_INSN(SELECTGES, res, cc->cmp_reg, const_one, const_zero);
break;
}
default:
{
bh_assert(!"unknown FloatCond");
goto fail;
}
}
}
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_compare(JitCompContext *cc, FloatCond cond)
{
JitReg res, const_zero, const_one;
JitReg lhs, rhs;
POP_F32(rhs);
POP_F32(lhs);
if (jit_reg_is_const_val(lhs) && jit_reg_is_const_val(rhs)) {
float32 lvalue = jit_cc_get_const_F32(cc, lhs);
float32 rvalue = jit_cc_get_const_F32(cc, rhs);
const_zero = NEW_CONST(I32, 0);
const_one = NEW_CONST(I32, 1);
switch (cond) {
case FLOAT_EQ:
{
res = (lvalue == rvalue) ? const_one : const_zero;
break;
}
case FLOAT_NE:
{
res = (lvalue != rvalue) ? const_one : const_zero;
break;
}
case FLOAT_LT:
{
res = (lvalue < rvalue) ? const_one : const_zero;
break;
}
case FLOAT_GT:
{
res = (lvalue > rvalue) ? const_one : const_zero;
break;
}
case FLOAT_LE:
{
res = (lvalue <= rvalue) ? const_one : const_zero;
break;
}
case FLOAT_GE:
{
res = (lvalue >= rvalue) ? const_one : const_zero;
break;
}
default:
{
bh_assert(!"unknown FloatCond");
goto fail;
}
}
PUSH_I32(res);
return true;
}
return jit_compile_op_compare_float_point(cc, cond, lhs, rhs);
fail:
return false;
}
bool
jit_compile_op_f64_compare(JitCompContext *cc, FloatCond cond)
{
JitReg res, const_zero, const_one;
JitReg lhs, rhs;
POP_F64(rhs);
POP_F64(lhs);
if (jit_reg_is_const_val(lhs) && jit_reg_is_const_val(rhs)) {
float64 lvalue = jit_cc_get_const_F64(cc, lhs);
float64 rvalue = jit_cc_get_const_F64(cc, rhs);
const_zero = NEW_CONST(I32, 0);
const_one = NEW_CONST(I32, 1);
switch (cond) {
case FLOAT_EQ:
{
res = (lvalue == rvalue) ? const_one : const_zero;
break;
}
case FLOAT_NE:
{
res = (lvalue != rvalue) ? const_one : const_zero;
break;
}
case FLOAT_LT:
{
res = (lvalue < rvalue) ? const_one : const_zero;
break;
}
case FLOAT_GT:
{
res = (lvalue > rvalue) ? const_one : const_zero;
break;
}
case FLOAT_LE:
{
res = (lvalue <= rvalue) ? const_one : const_zero;
break;
}
case FLOAT_GE:
{
res = (lvalue >= rvalue) ? const_one : const_zero;
break;
}
default:
{
bh_assert(!"unknown FloatCond");
goto fail;
}
}
PUSH_I32(res);
return true;
}
return jit_compile_op_compare_float_point(cc, cond, lhs, rhs);
fail:
return false;
}

View File

@ -0,0 +1,32 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_COMPARE_H_
#define _JIT_EMIT_COMPARE_H_
#include "../jit_compiler.h"
#include "../jit_frontend.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_compare(JitCompContext *cc, IntCond cond);
bool
jit_compile_op_i64_compare(JitCompContext *cc, IntCond cond);
bool
jit_compile_op_f32_compare(JitCompContext *cc, FloatCond cond);
bool
jit_compile_op_f64_compare(JitCompContext *cc, FloatCond cond);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_COMPARE_H_ */

View File

@ -0,0 +1,47 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_const.h"
#include "../jit_frontend.h"
bool
jit_compile_op_i32_const(JitCompContext *cc, int32 i32_const)
{
JitReg value = NEW_CONST(I32, i32_const);
PUSH_I32(value);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_const(JitCompContext *cc, int64 i64_const)
{
JitReg value = NEW_CONST(I64, i64_const);
PUSH_I64(value);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_const(JitCompContext *cc, float32 f32_const)
{
JitReg value = NEW_CONST(F32, f32_const);
PUSH_F32(value);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_const(JitCompContext *cc, float64 f64_const)
{
JitReg value = NEW_CONST(F64, f64_const);
PUSH_F64(value);
return true;
fail:
return false;
}

View File

@ -0,0 +1,31 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_CONST_H_
#define _JIT_EMIT_CONST_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_const(JitCompContext *cc, int32 i32_const);
bool
jit_compile_op_i64_const(JitCompContext *cc, int64 i64_const);
bool
jit_compile_op_f32_const(JitCompContext *cc, float32 f32_const);
bool
jit_compile_op_f64_const(JitCompContext *cc, float64 f64_const);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_CONST_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,56 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_CONTROL_H_
#define _JIT_EMIT_CONTROL_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_block(JitCompContext *cc, uint8 **p_frame_ip,
uint8 *frame_ip_end, uint32 label_type, uint32 param_count,
uint8 *param_types, uint32 result_count,
uint8 *result_types, bool merge_cmp_and_if);
bool
jit_compile_op_else(JitCompContext *cc, uint8 **p_frame_ip);
bool
jit_compile_op_end(JitCompContext *cc, uint8 **p_frame_ip);
bool
jit_compile_op_br(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip);
bool
jit_compile_op_br_if(JitCompContext *cc, uint32 br_depth,
bool merge_cmp_and_br_if, uint8 **p_frame_ip);
bool
jit_compile_op_br_table(JitCompContext *cc, uint32 *br_depths, uint32 br_count,
uint8 **p_frame_ip);
bool
jit_compile_op_return(JitCompContext *cc, uint8 **p_frame_ip);
bool
jit_compile_op_unreachable(JitCompContext *cc, uint8 **p_frame_ip);
bool
jit_handle_next_reachable_block(JitCompContext *cc, uint8 **p_frame_ip);
#if WASM_ENABLE_THREAD_MGR != 0
bool
jit_check_suspend_flags(JitCompContext *cc);
#endif
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_CONTROL_H_ */

View File

@ -0,0 +1,660 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_conversion.h"
#include "jit_emit_exception.h"
#include "jit_emit_function.h"
#include "../jit_codegen.h"
#include "../jit_frontend.h"
#define F32_I32_S_MIN (-2147483904.0f)
#define F32_I32_S_MAX (2147483648.0f)
#define F32_I32_U_MIN (-1.0f)
#define F32_I32_U_MAX (4294967296.0f)
#define F32_I64_S_MIN (-9223373136366403584.0f)
#define F32_I64_S_MAX (9223372036854775808.0f)
#define F32_I64_U_MIN (-1.0f)
#define F32_I64_U_MAX (18446744073709551616.0f)
#define F64_I32_S_MIN (-2147483649.0)
#define F64_I32_S_MAX (2147483648.0)
#define F64_I32_U_MIN (-1.0)
#define F64_I32_U_MAX (4294967296.0)
#define F64_I64_S_MIN (-9223372036854777856.0)
#define F64_I64_S_MAX (9223372036854775808.0)
#define F64_I64_U_MIN (-1.0)
#define F64_I64_U_MAX (18446744073709551616.0)
#define FP_TO_INT(f_ty, i_ty, f_nm, i_nm) \
static i_ty i_nm##_trunc_##f_nm(f_ty fp)
#define INT_TO_FP(i_ty, f_ty, i_nm, f_nm) \
static f_ty f_nm##_convert_##i_nm(i_ty i)
#define FP_TO_INT_SAT(f_ty, i_ty, f_nm, i_nm) \
static i_ty i_nm##_trunc_##f_nm##_sat(f_ty fp)
static int
local_isnan(double x)
{
return isnan(x);
}
static int
local_isnanf(float x)
{
return isnan(x);
}
#define RETURN_IF_NANF(fp) \
if (local_isnanf(fp)) { \
return 0; \
}
#define RETURN_IF_NAN(fp) \
if (local_isnan(fp)) { \
return 0; \
}
#define RETURN_IF_INF(fp, i_min, i_max) \
if (isinf(fp)) { \
return fp < 0 ? i_min : i_max; \
}
#define RETURN_IF_MIN(fp, f_min, i_min) \
if (fp <= f_min) { \
return i_min; \
}
#define RETURN_IF_MAX(fp, f_max, i_max) \
if (fp >= f_max) { \
return i_max; \
}
FP_TO_INT_SAT(float, int32, f32, i32)
{
RETURN_IF_NANF(fp)
RETURN_IF_INF(fp, INT32_MIN, INT32_MAX)
RETURN_IF_MIN(fp, F32_I32_S_MIN, INT32_MIN)
RETURN_IF_MAX(fp, F32_I32_S_MAX, INT32_MAX)
return (int32)fp;
}
FP_TO_INT_SAT(float, uint32, f32, u32)
{
RETURN_IF_NANF(fp)
RETURN_IF_INF(fp, 0, UINT32_MAX)
RETURN_IF_MIN(fp, F32_I32_U_MIN, 0)
RETURN_IF_MAX(fp, F32_I32_U_MAX, UINT32_MAX)
return (uint32)fp;
}
FP_TO_INT_SAT(double, int32, f64, i32)
{
RETURN_IF_NAN(fp)
RETURN_IF_INF(fp, INT32_MIN, INT32_MAX)
RETURN_IF_MIN(fp, F64_I32_S_MIN, INT32_MIN)
RETURN_IF_MAX(fp, F64_I32_S_MAX, INT32_MAX)
return (int32)fp;
}
FP_TO_INT_SAT(double, uint32, f64, u32)
{
RETURN_IF_NAN(fp)
RETURN_IF_INF(fp, 0, UINT32_MAX)
RETURN_IF_MIN(fp, F64_I32_U_MIN, 0)
RETURN_IF_MAX(fp, F64_I32_U_MAX, UINT32_MAX)
return (uint32)fp;
}
FP_TO_INT_SAT(float, int64, f32, i64)
{
RETURN_IF_NANF(fp)
RETURN_IF_INF(fp, INT64_MIN, INT64_MAX)
RETURN_IF_MIN(fp, F32_I64_S_MIN, INT64_MIN)
RETURN_IF_MAX(fp, F32_I64_S_MAX, INT64_MAX)
return (int64)fp;
}
FP_TO_INT(float, uint64, f32, u64)
{
return (uint64)fp;
}
FP_TO_INT_SAT(float, uint64, f32, u64)
{
RETURN_IF_NANF(fp)
RETURN_IF_INF(fp, 0, UINT64_MAX)
RETURN_IF_MIN(fp, F32_I64_U_MIN, 0)
RETURN_IF_MAX(fp, F32_I64_U_MAX, UINT64_MAX)
return (uint64)fp;
}
FP_TO_INT_SAT(double, int64, f64, i64)
{
RETURN_IF_NANF(fp)
RETURN_IF_INF(fp, INT64_MIN, INT64_MAX)
RETURN_IF_MIN(fp, F64_I64_S_MIN, INT64_MIN)
RETURN_IF_MAX(fp, F64_I64_S_MAX, INT64_MAX)
return (int64)fp;
}
FP_TO_INT(double, uint64, f64, u64)
{
return (uint64)fp;
}
FP_TO_INT_SAT(double, uint64, f64, u64)
{
RETURN_IF_NANF(fp)
RETURN_IF_INF(fp, 0, UINT64_MAX)
RETURN_IF_MIN(fp, F64_I64_U_MIN, 0)
RETURN_IF_MAX(fp, F64_I64_U_MAX, UINT64_MAX)
return (uint64)fp;
}
INT_TO_FP(uint64, float, u64, f32)
{
return (float)i;
}
INT_TO_FP(uint64, double, u64, f64)
{
return (double)i;
}
bool
jit_compile_op_i32_wrap_i64(JitCompContext *cc)
{
JitReg num, res;
POP_I64(num);
res = jit_cc_new_reg_I32(cc);
GEN_INSN(I64TOI32, res, num);
PUSH_I32(res);
return true;
fail:
return false;
}
static bool
jit_compile_check_value_range(JitCompContext *cc, JitReg value, JitReg min_fp,
JitReg max_fp)
{
JitReg nan_ret = jit_cc_new_reg_I32(cc);
JitRegKind kind = jit_reg_kind(value);
bool emit_ret = false;
bh_assert(JIT_REG_KIND_F32 == kind || JIT_REG_KIND_F64 == kind);
/* If value is NaN, throw exception */
if (JIT_REG_KIND_F32 == kind)
emit_ret = jit_emit_callnative(cc, local_isnanf, nan_ret, &value, 1);
else
emit_ret = jit_emit_callnative(cc, local_isnan, nan_ret, &value, 1);
if (!emit_ret)
goto fail;
GEN_INSN(CMP, cc->cmp_reg, nan_ret, NEW_CONST(I32, 1));
if (!jit_emit_exception(cc, JIT_EXCE_INVALID_CONVERSION_TO_INTEGER,
JIT_OP_BEQ, cc->cmp_reg, NULL))
goto fail;
/* If value is out of integer range, throw exception */
GEN_INSN(CMP, cc->cmp_reg, min_fp, value);
if (!jit_emit_exception(cc, JIT_EXCE_INTEGER_OVERFLOW, JIT_OP_BGES,
cc->cmp_reg, NULL))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, value, max_fp);
if (!jit_emit_exception(cc, JIT_EXCE_INTEGER_OVERFLOW, JIT_OP_BGES,
cc->cmp_reg, NULL))
goto fail;
return true;
fail:
return false;
}
bool
jit_compile_op_i32_trunc_f32(JitCompContext *cc, bool sign, bool sat)
{
JitReg value, res;
POP_F32(value);
res = jit_cc_new_reg_I32(cc);
if (!sat) {
JitReg min_fp = NEW_CONST(F32, sign ? F32_I32_S_MIN : F32_I32_U_MIN);
JitReg max_fp = NEW_CONST(F32, sign ? F32_I32_S_MAX : F32_I32_U_MAX);
if (!jit_compile_check_value_range(cc, value, min_fp, max_fp))
goto fail;
if (sign)
GEN_INSN(F32TOI32, res, value);
else
GEN_INSN(F32TOU32, res, value);
}
else {
if (!jit_emit_callnative(cc,
sign ? (void *)i32_trunc_f32_sat
: (void *)u32_trunc_f32_sat,
res, &value, 1))
goto fail;
}
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i32_trunc_f64(JitCompContext *cc, bool sign, bool sat)
{
JitReg value, res;
POP_F64(value);
res = jit_cc_new_reg_I32(cc);
if (!sat) {
JitReg min_fp = NEW_CONST(F64, sign ? F64_I32_S_MIN : F64_I32_U_MIN);
JitReg max_fp = NEW_CONST(F64, sign ? F64_I32_S_MAX : F64_I32_U_MAX);
if (!jit_compile_check_value_range(cc, value, min_fp, max_fp))
goto fail;
if (sign)
GEN_INSN(F64TOI32, res, value);
else
GEN_INSN(F64TOU32, res, value);
}
else {
if (!jit_emit_callnative(cc,
sign ? (void *)i32_trunc_f64_sat
: (void *)u32_trunc_f64_sat,
res, &value, 1))
goto fail;
}
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_extend_i32(JitCompContext *cc, bool sign)
{
JitReg num, res;
POP_I32(num);
res = jit_cc_new_reg_I64(cc);
if (sign)
GEN_INSN(I32TOI64, res, num);
else
GEN_INSN(U32TOI64, res, num);
PUSH_I64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_extend_i64(JitCompContext *cc, int8 bitwidth)
{
JitReg value, tmp, res;
POP_I64(value);
tmp = jit_cc_new_reg_I32(cc);
res = jit_cc_new_reg_I64(cc);
switch (bitwidth) {
case 8:
{
GEN_INSN(I64TOI8, tmp, value);
GEN_INSN(I8TOI64, res, tmp);
break;
}
case 16:
{
GEN_INSN(I64TOI16, tmp, value);
GEN_INSN(I16TOI64, res, tmp);
break;
}
case 32:
{
GEN_INSN(I64TOI32, tmp, value);
GEN_INSN(I32TOI64, res, tmp);
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
PUSH_I64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i32_extend_i32(JitCompContext *cc, int8 bitwidth)
{
JitReg value, tmp, res;
POP_I32(value);
tmp = jit_cc_new_reg_I32(cc);
res = jit_cc_new_reg_I32(cc);
switch (bitwidth) {
case 8:
{
GEN_INSN(I32TOI8, tmp, value);
GEN_INSN(I8TOI32, res, tmp);
break;
}
case 16:
{
GEN_INSN(I32TOI16, tmp, value);
GEN_INSN(I16TOI32, res, tmp);
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_trunc_f32(JitCompContext *cc, bool sign, bool sat)
{
JitReg value, res;
POP_F32(value);
res = jit_cc_new_reg_I64(cc);
if (!sat) {
JitReg min_fp = NEW_CONST(F32, sign ? F32_I64_S_MIN : F32_I64_U_MIN);
JitReg max_fp = NEW_CONST(F32, sign ? F32_I64_S_MAX : F32_I64_U_MAX);
if (!jit_compile_check_value_range(cc, value, min_fp, max_fp))
goto fail;
if (sign) {
GEN_INSN(F32TOI64, res, value);
}
else {
if (!jit_emit_callnative(cc, u64_trunc_f32, res, &value, 1))
goto fail;
}
}
else {
if (!jit_emit_callnative(cc,
sign ? (void *)i64_trunc_f32_sat
: (void *)u64_trunc_f32_sat,
res, &value, 1))
goto fail;
}
PUSH_I64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_trunc_f64(JitCompContext *cc, bool sign, bool sat)
{
JitReg value, res;
POP_F64(value);
res = jit_cc_new_reg_I64(cc);
if (!sat) {
JitReg min_fp = NEW_CONST(F64, sign ? F64_I64_S_MIN : F64_I64_U_MIN);
JitReg max_fp = NEW_CONST(F64, sign ? F64_I64_S_MAX : F64_I64_U_MAX);
if (!jit_compile_check_value_range(cc, value, min_fp, max_fp))
goto fail;
if (sign) {
GEN_INSN(F64TOI64, res, value);
}
else {
if (!jit_emit_callnative(cc, u64_trunc_f64, res, &value, 1))
goto fail;
}
}
else {
if (!jit_emit_callnative(cc,
sign ? (void *)i64_trunc_f64_sat
: (void *)u64_trunc_f64_sat,
res, &value, 1))
goto fail;
}
PUSH_I64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_convert_i32(JitCompContext *cc, bool sign)
{
JitReg value, res;
POP_I32(value);
res = jit_cc_new_reg_F32(cc);
if (sign) {
GEN_INSN(I32TOF32, res, value);
}
else {
GEN_INSN(U32TOF32, res, value);
}
PUSH_F32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_convert_i64(JitCompContext *cc, bool sign)
{
JitReg value, res;
POP_I64(value);
res = jit_cc_new_reg_F32(cc);
if (sign) {
GEN_INSN(I64TOF32, res, value);
}
else {
if (!jit_emit_callnative(cc, f32_convert_u64, res, &value, 1)) {
goto fail;
}
}
PUSH_F32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_demote_f64(JitCompContext *cc)
{
JitReg value, res;
POP_F64(value);
res = jit_cc_new_reg_F32(cc);
GEN_INSN(F64TOF32, res, value);
PUSH_F32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_convert_i32(JitCompContext *cc, bool sign)
{
JitReg value, res;
POP_I32(value);
res = jit_cc_new_reg_F64(cc);
if (sign)
GEN_INSN(I32TOF64, res, value);
else
GEN_INSN(U32TOF64, res, value);
PUSH_F64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_convert_i64(JitCompContext *cc, bool sign)
{
JitReg value, res;
POP_I64(value);
res = jit_cc_new_reg_F64(cc);
if (sign) {
GEN_INSN(I64TOF64, res, value);
}
else {
if (!jit_emit_callnative(cc, f64_convert_u64, res, &value, 1)) {
goto fail;
}
}
PUSH_F64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_promote_f32(JitCompContext *cc)
{
JitReg value, res;
POP_F32(value);
res = jit_cc_new_reg_F64(cc);
GEN_INSN(F32TOF64, res, value);
PUSH_F64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_reinterpret_f64(JitCompContext *cc)
{
JitReg value, res;
POP_F64(value);
res = jit_cc_new_reg_I64(cc);
GEN_INSN(F64CASTI64, res, value);
PUSH_I64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i32_reinterpret_f32(JitCompContext *cc)
{
JitReg value, res;
POP_F32(value);
res = jit_cc_new_reg_I32(cc);
GEN_INSN(F32CASTI32, res, value);
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_reinterpret_i64(JitCompContext *cc)
{
JitReg value, res;
POP_I64(value);
res = jit_cc_new_reg_F64(cc);
GEN_INSN(I64CASTF64, res, value);
PUSH_F64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_reinterpret_i32(JitCompContext *cc)
{
JitReg value, res;
POP_I32(value);
res = jit_cc_new_reg_F32(cc);
GEN_INSN(I32CASTF32, res, value);
PUSH_F32(res);
return true;
fail:
return false;
}

View File

@ -0,0 +1,73 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_CONVERSION_H_
#define _JIT_EMIT_CONVERSION_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_wrap_i64(JitCompContext *cc);
bool
jit_compile_op_i32_trunc_f32(JitCompContext *cc, bool sign, bool sat);
bool
jit_compile_op_i32_trunc_f64(JitCompContext *cc, bool sign, bool sat);
bool
jit_compile_op_i64_extend_i32(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_i64_extend_i64(JitCompContext *comp_ctx, int8 bitwidth);
bool
jit_compile_op_i32_extend_i32(JitCompContext *comp_ctx, int8 bitwidth);
bool
jit_compile_op_i64_trunc_f32(JitCompContext *cc, bool sign, bool sat);
bool
jit_compile_op_i64_trunc_f64(JitCompContext *cc, bool sign, bool sat);
bool
jit_compile_op_f32_convert_i32(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_f32_convert_i64(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_f32_demote_f64(JitCompContext *comp_ctx);
bool
jit_compile_op_f64_convert_i32(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_f64_convert_i64(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_f64_promote_f32(JitCompContext *comp_ctx);
bool
jit_compile_op_i64_reinterpret_f64(JitCompContext *comp_ctx);
bool
jit_compile_op_i32_reinterpret_f32(JitCompContext *comp_ctx);
bool
jit_compile_op_f64_reinterpret_i64(JitCompContext *comp_ctx);
bool
jit_compile_op_f32_reinterpret_i32(JitCompContext *comp_ctx);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_CONVERSION_H_ */

View File

@ -0,0 +1,78 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_exception.h"
#include "../jit_frontend.h"
bool
jit_emit_exception(JitCompContext *cc, int32 exception_id, uint8 jit_opcode,
JitReg cond_br_if, JitBasicBlock *cond_br_else_block)
{
JitInsn *insn = NULL;
JitIncomingInsn *incoming_insn;
JitReg else_label;
bh_assert(exception_id < JIT_EXCE_NUM);
if (jit_opcode >= JIT_OP_BEQ && jit_opcode <= JIT_OP_BLEU) {
bh_assert(cond_br_if == cc->cmp_reg);
else_label =
cond_br_else_block ? jit_basic_block_label(cond_br_else_block) : 0;
switch (jit_opcode) {
case JIT_OP_BEQ:
insn = GEN_INSN(BEQ, cond_br_if, 0, else_label);
break;
case JIT_OP_BNE:
insn = GEN_INSN(BNE, cond_br_if, 0, else_label);
break;
case JIT_OP_BGTS:
insn = GEN_INSN(BGTS, cond_br_if, 0, else_label);
break;
case JIT_OP_BGES:
insn = GEN_INSN(BGES, cond_br_if, 0, else_label);
break;
case JIT_OP_BLTS:
insn = GEN_INSN(BLTS, cond_br_if, 0, else_label);
break;
case JIT_OP_BLES:
insn = GEN_INSN(BLES, cond_br_if, 0, else_label);
break;
case JIT_OP_BGTU:
insn = GEN_INSN(BGTU, cond_br_if, 0, else_label);
break;
case JIT_OP_BGEU:
insn = GEN_INSN(BGEU, cond_br_if, 0, else_label);
break;
case JIT_OP_BLTU:
insn = GEN_INSN(BLTU, cond_br_if, 0, else_label);
break;
case JIT_OP_BLEU:
insn = GEN_INSN(BLEU, cond_br_if, 0, else_label);
break;
}
if (!insn) {
jit_set_last_error(cc, "generate cond br insn failed");
return false;
}
}
else if (jit_opcode == JIT_OP_JMP) {
insn = GEN_INSN(JMP, 0);
if (!insn) {
jit_set_last_error(cc, "generate jmp insn failed");
return false;
}
}
incoming_insn = jit_calloc(sizeof(JitIncomingInsn));
if (!incoming_insn) {
jit_set_last_error(cc, "allocate memory failed");
return false;
}
incoming_insn->insn = insn;
incoming_insn->next = cc->incoming_insns_for_exec_bbs[exception_id];
cc->incoming_insns_for_exec_bbs[exception_id] = incoming_insn;
return true;
}

View File

@ -0,0 +1,23 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_EXCEPTION_H_
#define _JIT_EMIT_EXCEPTION_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_emit_exception(JitCompContext *cc, int32 exception_id, uint8 jit_opcode,
JitReg cond_br_if, JitBasicBlock *cond_br_else_block);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_EXCEPTION_H_ */

View File

@ -0,0 +1,535 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_function.h"
#include "jit_emit_exception.h"
#include "../jit_frontend.h"
#include "../jit_codegen.h"
#include "../../interpreter/wasm_runtime.h"
extern bool
jit_invoke_native(WASMExecEnv *exec_env, uint32 func_idx,
WASMInterpFrame *prev_frame);
/* Prepare parameters for the function to call */
static bool
pre_call(JitCompContext *cc, const WASMType *func_type)
{
JitReg value;
uint32 i, outs_off;
/* Prepare parameters for the function to call */
outs_off =
cc->total_frame_size + offsetof(WASMInterpFrame, lp)
+ wasm_get_cell_num(func_type->types, func_type->param_count) * 4;
for (i = 0; i < func_type->param_count; i++) {
switch (func_type->types[func_type->param_count - 1 - i]) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
POP_I32(value);
outs_off -= 4;
GEN_INSN(STI32, value, cc->fp_reg, NEW_CONST(I32, outs_off));
break;
case VALUE_TYPE_I64:
POP_I64(value);
outs_off -= 8;
GEN_INSN(STI64, value, cc->fp_reg, NEW_CONST(I32, outs_off));
break;
case VALUE_TYPE_F32:
POP_F32(value);
outs_off -= 4;
GEN_INSN(STF32, value, cc->fp_reg, NEW_CONST(I32, outs_off));
break;
case VALUE_TYPE_F64:
POP_F64(value);
outs_off -= 8;
GEN_INSN(STF64, value, cc->fp_reg, NEW_CONST(I32, outs_off));
break;
default:
bh_assert(0);
goto fail;
}
}
/* Commit sp as the callee may use it to store the results */
gen_commit_sp_ip(cc->jit_frame);
return true;
fail:
return false;
}
/* Push results */
static bool
post_return(JitCompContext *cc, const WASMType *func_type, JitReg first_res)
{
uint32 i, n;
JitReg value;
n = cc->jit_frame->sp - cc->jit_frame->lp;
for (i = 0; i < func_type->result_count; i++) {
switch (func_type->types[func_type->param_count + i]) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
if (i == 0 && first_res) {
bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_I32);
value = first_res;
}
else {
value = jit_cc_new_reg_I32(cc);
GEN_INSN(LDI32, value, cc->fp_reg,
NEW_CONST(I32, offset_of_local(n)));
}
PUSH_I32(value);
n++;
break;
case VALUE_TYPE_I64:
if (i == 0 && first_res) {
bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_I64);
value = first_res;
}
else {
value = jit_cc_new_reg_I64(cc);
GEN_INSN(LDI64, value, cc->fp_reg,
NEW_CONST(I32, offset_of_local(n)));
}
PUSH_I64(value);
n += 2;
break;
case VALUE_TYPE_F32:
if (i == 0 && first_res) {
bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_F32);
value = first_res;
}
else {
value = jit_cc_new_reg_F32(cc);
GEN_INSN(LDF32, value, cc->fp_reg,
NEW_CONST(I32, offset_of_local(n)));
}
PUSH_F32(value);
n++;
break;
case VALUE_TYPE_F64:
if (i == 0 && first_res) {
bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_F64);
value = first_res;
}
else {
value = jit_cc_new_reg_F64(cc);
GEN_INSN(LDF64, value, cc->fp_reg,
NEW_CONST(I32, offset_of_local(n)));
}
PUSH_F64(value);
n += 2;
break;
default:
bh_assert(0);
goto fail;
}
}
/* Update the committed_sp as the callee has updated the frame sp */
cc->jit_frame->committed_sp = cc->jit_frame->sp;
return true;
fail:
return false;
}
bool
jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
{
WASMModule *wasm_module = cc->cur_wasm_module;
WASMFunctionImport *func_import;
WASMFunction *func;
WASMType *func_type;
JitFrame *jit_frame = cc->jit_frame;
JitReg native_ret;
JitReg fast_jit_func_ptrs, jitted_code = 0;
uint32 jitted_func_idx;
if (func_idx >= wasm_module->import_function_count) {
fast_jit_func_ptrs = get_fast_jit_func_ptrs_reg(jit_frame);
jitted_code = jit_cc_new_reg_ptr(cc);
/* jitted_code = func_ptrs[func_idx - import_function_count] */
jitted_func_idx = func_idx - wasm_module->import_function_count;
GEN_INSN(LDPTR, jitted_code, fast_jit_func_ptrs,
NEW_CONST(I32, (uint32)sizeof(void *) * jitted_func_idx));
}
if (func_idx < wasm_module->import_function_count) {
func_import = &wasm_module->import_functions[func_idx].u.function;
func_type = func_import->func_type;
}
else {
func = wasm_module
->functions[func_idx - wasm_module->import_function_count];
func_type = func->func_type;
}
if (!pre_call(cc, func_type)) {
goto fail;
}
if (func_idx < wasm_module->import_function_count) {
JitReg arg_regs[3];
native_ret = jit_cc_new_reg_I32(cc);
arg_regs[0] = cc->exec_env_reg;
arg_regs[1] = NEW_CONST(I32, func_idx);
arg_regs[2] = cc->fp_reg;
if (!jit_emit_callnative(cc, jit_invoke_native, native_ret, arg_regs,
3)) {
return false;
}
/* Convert bool to uint32 */
GEN_INSN(AND, native_ret, native_ret, NEW_CONST(I32, 0xFF));
/* Check whether there is exception thrown */
GEN_INSN(CMP, cc->cmp_reg, native_ret, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BEQ,
cc->cmp_reg, NULL)) {
return false;
}
if (!post_return(cc, func_type, 0)) {
goto fail;
}
}
else {
JitReg res = 0;
if (func_type->result_count > 0) {
switch (func_type->types[func_type->param_count]) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
res = jit_cc_new_reg_I32(cc);
break;
case VALUE_TYPE_I64:
res = jit_cc_new_reg_I64(cc);
break;
case VALUE_TYPE_F32:
res = jit_cc_new_reg_F32(cc);
break;
case VALUE_TYPE_F64:
res = jit_cc_new_reg_F64(cc);
break;
default:
bh_assert(0);
goto fail;
}
}
GEN_INSN(CALLBC, res, 0, jitted_code);
if (!post_return(cc, func_type, res)) {
goto fail;
}
}
/* Clear part of memory regs and table regs as their values
may be changed in the function call */
if (cc->cur_wasm_module->possible_memory_grow)
clear_memory_regs(jit_frame);
clear_table_regs(jit_frame);
/* Ignore tail call currently */
(void)tail_call;
return true;
fail:
return false;
}
static JitReg
pack_argv(JitCompContext *cc)
{
/* reuse the stack of the next frame */
uint32 stack_base;
JitReg argv;
stack_base = cc->total_frame_size + offsetof(WASMInterpFrame, lp);
argv = jit_cc_new_reg_ptr(cc);
GEN_INSN(ADD, argv, cc->fp_reg, NEW_CONST(PTR, stack_base));
return argv;
}
static bool
unpack_argv(JitCompContext *cc, const WASMType *func_type, JitReg argv)
{
uint32 i, offset_by_cell = 0;
JitReg value;
/* push results in argv to stack */
for (i = 0; i < func_type->result_count; i++) {
switch (func_type->types[func_type->param_count + i]) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
{
value = jit_cc_new_reg_I32(cc);
GEN_INSN(LDI32, value, argv, NEW_CONST(I32, offset_by_cell));
PUSH_I32(value);
offset_by_cell += 4;
break;
}
case VALUE_TYPE_I64:
{
value = jit_cc_new_reg_I64(cc);
GEN_INSN(LDI64, value, argv, NEW_CONST(I32, offset_by_cell));
PUSH_I64(value);
offset_by_cell += 8;
break;
}
case VALUE_TYPE_F32:
{
value = jit_cc_new_reg_F32(cc);
GEN_INSN(LDF32, value, argv, NEW_CONST(I32, offset_by_cell));
PUSH_F32(value);
offset_by_cell += 4;
break;
}
case VALUE_TYPE_F64:
{
value = jit_cc_new_reg_F64(cc);
GEN_INSN(LDF64, value, argv, NEW_CONST(I32, offset_by_cell));
PUSH_F64(value);
offset_by_cell += 8;
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
}
/* Update the committed_sp as the callee has updated the frame sp */
cc->jit_frame->committed_sp = cc->jit_frame->sp;
return true;
fail:
return false;
}
bool
jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
uint32 tbl_idx)
{
JitReg elem_idx, native_ret, argv, arg_regs[6];
WASMType *func_type;
POP_I32(elem_idx);
func_type = cc->cur_wasm_module->types[type_idx];
if (!pre_call(cc, func_type)) {
goto fail;
}
argv = pack_argv(cc);
native_ret = jit_cc_new_reg_I32(cc);
arg_regs[0] = cc->exec_env_reg;
arg_regs[1] = NEW_CONST(I32, tbl_idx);
arg_regs[2] = elem_idx;
arg_regs[3] = NEW_CONST(I32, type_idx);
arg_regs[4] = NEW_CONST(I32, func_type->param_cell_num);
arg_regs[5] = argv;
if (!jit_emit_callnative(cc, jit_call_indirect, native_ret, arg_regs, 6)) {
return false;
}
/* Convert bool to uint32 */
GEN_INSN(AND, native_ret, native_ret, NEW_CONST(I32, 0xFF));
/* Check whether there is exception thrown */
GEN_INSN(CMP, cc->cmp_reg, native_ret, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BEQ,
cc->cmp_reg, NULL)) {
return false;
}
if (!unpack_argv(cc, func_type, argv)) {
goto fail;
}
/* Clear part of memory regs and table regs as their values
may be changed in the function call */
if (cc->cur_wasm_module->possible_memory_grow)
clear_memory_regs(cc->jit_frame);
clear_table_regs(cc->jit_frame);
return true;
fail:
return false;
}
#if WASM_ENABLE_REF_TYPES != 0
bool
jit_compile_op_ref_null(JitCompContext *cc, uint32 ref_type)
{
PUSH_I32(NEW_CONST(I32, NULL_REF));
(void)ref_type;
return true;
fail:
return false;
}
bool
jit_compile_op_ref_is_null(JitCompContext *cc)
{
JitReg ref, res;
POP_I32(ref);
GEN_INSN(CMP, cc->cmp_reg, ref, NEW_CONST(I32, NULL_REF));
res = jit_cc_new_reg_I32(cc);
GEN_INSN(SELECTEQ, res, cc->cmp_reg, NEW_CONST(I32, 1), NEW_CONST(I32, 0));
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_ref_func(JitCompContext *cc, uint32 func_idx)
{
PUSH_I32(NEW_CONST(I32, func_idx));
return true;
fail:
return false;
}
#endif
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
bool
jit_emit_callnative(JitCompContext *cc, void *native_func, JitReg res,
JitReg *params, uint32 param_count)
{
JitInsn *insn;
char *i64_arg_names[] = { "rdi", "rsi", "rdx", "rcx", "r8", "r9" };
char *f32_arg_names[] = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" };
char *f64_arg_names[] = { "xmm0_f64", "xmm1_f64", "xmm2_f64",
"xmm3_f64", "xmm4_f64", "xmm5_f64" };
JitReg i64_arg_regs[6], f32_arg_regs[6], f64_arg_regs[6], res_hreg = 0;
JitReg eax_hreg = jit_codegen_get_hreg_by_name("eax");
JitReg rax_hreg = jit_codegen_get_hreg_by_name("rax");
JitReg xmm0_hreg = jit_codegen_get_hreg_by_name("xmm0");
JitReg xmm0_f64_hreg = jit_codegen_get_hreg_by_name("xmm0_f64");
uint32 i, i64_reg_idx, float_reg_idx;
bh_assert(param_count <= 6);
for (i = 0; i < 6; i++) {
i64_arg_regs[i] = jit_codegen_get_hreg_by_name(i64_arg_names[i]);
f32_arg_regs[i] = jit_codegen_get_hreg_by_name(f32_arg_names[i]);
f64_arg_regs[i] = jit_codegen_get_hreg_by_name(f64_arg_names[i]);
}
i64_reg_idx = float_reg_idx = 0;
for (i = 0; i < param_count; i++) {
switch (jit_reg_kind(params[i])) {
case JIT_REG_KIND_I32:
GEN_INSN(I32TOI64, i64_arg_regs[i64_reg_idx++], params[i]);
break;
case JIT_REG_KIND_I64:
GEN_INSN(MOV, i64_arg_regs[i64_reg_idx++], params[i]);
break;
case JIT_REG_KIND_F32:
GEN_INSN(MOV, f32_arg_regs[float_reg_idx++], params[i]);
break;
case JIT_REG_KIND_F64:
GEN_INSN(MOV, f64_arg_regs[float_reg_idx++], params[i]);
break;
default:
bh_assert(0);
return false;
}
}
if (res) {
switch (jit_reg_kind(res)) {
case JIT_REG_KIND_I32:
res_hreg = eax_hreg;
break;
case JIT_REG_KIND_I64:
res_hreg = rax_hreg;
break;
case JIT_REG_KIND_F32:
res_hreg = xmm0_hreg;
break;
case JIT_REG_KIND_F64:
res_hreg = xmm0_f64_hreg;
break;
default:
bh_assert(0);
return false;
}
}
insn = GEN_INSN(CALLNATIVE, res_hreg,
NEW_CONST(PTR, (uintptr_t)native_func), param_count);
if (!insn) {
return false;
}
i64_reg_idx = float_reg_idx = 0;
for (i = 0; i < param_count; i++) {
switch (jit_reg_kind(params[i])) {
case JIT_REG_KIND_I32:
case JIT_REG_KIND_I64:
*(jit_insn_opndv(insn, i + 2)) = i64_arg_regs[i64_reg_idx++];
break;
case JIT_REG_KIND_F32:
*(jit_insn_opndv(insn, i + 2)) = f32_arg_regs[float_reg_idx++];
break;
case JIT_REG_KIND_F64:
*(jit_insn_opndv(insn, i + 2)) = f64_arg_regs[float_reg_idx++];
break;
default:
bh_assert(0);
return false;
}
}
if (res) {
GEN_INSN(MOV, res, res_hreg);
}
return true;
}
#else
bool
jit_emit_callnative(JitCompContext *cc, void *native_func, JitReg res,
JitReg *params, uint32 param_count)
{
JitInsn *insn;
uint32 i;
bh_assert(param_count <= 6);
insn = GEN_INSN(CALLNATIVE, res, NEW_CONST(PTR, (uintptr_t)native_func),
param_count);
if (!insn)
return false;
for (i = 0; i < param_count; i++) {
*(jit_insn_opndv(insn, i + 2)) = params[i];
}
return true;
}
#endif

View File

@ -0,0 +1,39 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_FUNCTION_H_
#define _JIT_EMIT_FUNCTION_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call);
bool
jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
uint32 tbl_idx);
bool
jit_compile_op_ref_null(JitCompContext *cc, uint32 ref_type);
bool
jit_compile_op_ref_is_null(JitCompContext *cc);
bool
jit_compile_op_ref_func(JitCompContext *cc, uint32 func_idx);
bool
jit_emit_callnative(JitCompContext *cc, void *native_func, JitReg res,
JitReg *params, uint32 param_count);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_FUNCTION_H_ */

View File

@ -0,0 +1,782 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_memory.h"
#include "jit_emit_exception.h"
#include "jit_emit_function.h"
#include "../jit_frontend.h"
#include "../jit_codegen.h"
#include "../../interpreter/wasm_runtime.h"
#ifndef OS_ENABLE_HW_BOUND_CHECK
static JitReg
get_memory_boundary(JitCompContext *cc, uint32 mem_idx, uint32 bytes)
{
JitReg memory_boundary;
switch (bytes) {
case 1:
{
memory_boundary =
get_mem_bound_check_1byte_reg(cc->jit_frame, mem_idx);
break;
}
case 2:
{
memory_boundary =
get_mem_bound_check_2bytes_reg(cc->jit_frame, mem_idx);
break;
}
case 4:
{
memory_boundary =
get_mem_bound_check_4bytes_reg(cc->jit_frame, mem_idx);
break;
}
case 8:
{
memory_boundary =
get_mem_bound_check_8bytes_reg(cc->jit_frame, mem_idx);
break;
}
case 16:
{
memory_boundary =
get_mem_bound_check_16bytes_reg(cc->jit_frame, mem_idx);
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
return memory_boundary;
fail:
return 0;
}
#endif
#if UINTPTR_MAX == UINT64_MAX
static JitReg
check_and_seek_on_64bit_platform(JitCompContext *cc, JitReg addr, JitReg offset,
JitReg memory_boundary)
{
JitReg long_addr, offset1;
/* long_addr = (int64_t)addr */
long_addr = jit_cc_new_reg_I64(cc);
GEN_INSN(U32TOI64, long_addr, addr);
/* offset1 = offset + long_addr */
offset1 = jit_cc_new_reg_I64(cc);
GEN_INSN(ADD, offset1, offset, long_addr);
#ifndef OS_ENABLE_HW_BOUND_CHECK
/* if (offset1 > memory_boundary) goto EXCEPTION */
GEN_INSN(CMP, cc->cmp_reg, offset1, memory_boundary);
if (!jit_emit_exception(cc, JIT_EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
JIT_OP_BGTU, cc->cmp_reg, NULL)) {
goto fail;
}
#endif
return offset1;
#ifndef OS_ENABLE_HW_BOUND_CHECK
fail:
return 0;
#endif
}
#else
static JitReg
check_and_seek_on_32bit_platform(JitCompContext *cc, JitReg addr, JitReg offset,
JitReg memory_boundary)
{
JitReg offset1;
/* offset1 = offset + addr */
offset1 = jit_cc_new_reg_I32(cc);
GEN_INSN(ADD, offset1, offset, addr);
/* if (offset1 < addr) goto EXCEPTION */
GEN_INSN(CMP, cc->cmp_reg, offset1, addr);
if (!jit_emit_exception(cc, JIT_EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
JIT_OP_BLTU, cc->cmp_reg, NULL)) {
goto fail;
}
#ifndef OS_ENABLE_HW_BOUND_CHECK
/* if (offset1 > memory_boundary) goto EXCEPTION */
GEN_INSN(CMP, cc->cmp_reg, offset1, memory_boundary);
if (!jit_emit_exception(cc, JIT_EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
JIT_OP_BGTU, cc->cmp_reg, NULL)) {
goto fail;
}
#endif
return offset1;
fail:
return 0;
}
#endif
static JitReg
check_and_seek(JitCompContext *cc, JitReg addr, uint32 offset, uint32 bytes)
{
JitReg memory_boundary = 0, offset1;
#ifndef OS_ENABLE_HW_BOUND_CHECK
/* the default memory */
uint32 mem_idx = 0;
#endif
#ifndef OS_ENABLE_HW_BOUND_CHECK
/* ---------- check ---------- */
/* 1. shortcut if the memory size is 0 */
if (0 == cc->cur_wasm_module->memories[mem_idx].init_page_count) {
JitReg memory_inst, cur_mem_page_count;
/* if (cur_mem_page_count == 0) goto EXCEPTION */
memory_inst = get_memory_inst_reg(cc->jit_frame, mem_idx);
cur_mem_page_count = jit_cc_new_reg_I32(cc);
GEN_INSN(LDI32, cur_mem_page_count, memory_inst,
NEW_CONST(I32, offsetof(WASMMemoryInstance, cur_page_count)));
GEN_INSN(CMP, cc->cmp_reg, cur_mem_page_count, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
JIT_OP_BEQ, cc->cmp_reg, NULL)) {
goto fail;
}
}
/* 2. a complete boundary check */
memory_boundary = get_memory_boundary(cc, mem_idx, bytes);
if (!memory_boundary)
goto fail;
#endif
#if UINTPTR_MAX == UINT64_MAX
offset1 = check_and_seek_on_64bit_platform(cc, addr, NEW_CONST(I64, offset),
memory_boundary);
if (!offset1)
goto fail;
#else
offset1 = check_and_seek_on_32bit_platform(cc, addr, NEW_CONST(I32, offset),
memory_boundary);
if (!offset1)
goto fail;
#endif
return offset1;
fail:
return 0;
}
bool
jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic)
{
JitReg addr, offset1, value, memory_data;
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1) {
goto fail;
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
value = jit_cc_new_reg_I32(cc);
switch (bytes) {
case 1:
{
if (sign) {
GEN_INSN(LDI8, value, memory_data, offset1);
}
else {
GEN_INSN(LDU8, value, memory_data, offset1);
}
break;
}
case 2:
{
if (sign) {
GEN_INSN(LDI16, value, memory_data, offset1);
}
else {
GEN_INSN(LDU16, value, memory_data, offset1);
}
break;
}
case 4:
{
if (sign) {
GEN_INSN(LDI32, value, memory_data, offset1);
}
else {
GEN_INSN(LDU32, value, memory_data, offset1);
}
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
PUSH_I32(value);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic)
{
JitReg addr, offset1, value, memory_data;
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1) {
goto fail;
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
value = jit_cc_new_reg_I64(cc);
switch (bytes) {
case 1:
{
if (sign) {
GEN_INSN(LDI8, value, memory_data, offset1);
}
else {
GEN_INSN(LDU8, value, memory_data, offset1);
}
break;
}
case 2:
{
if (sign) {
GEN_INSN(LDI16, value, memory_data, offset1);
}
else {
GEN_INSN(LDU16, value, memory_data, offset1);
}
break;
}
case 4:
{
if (sign) {
GEN_INSN(LDI32, value, memory_data, offset1);
}
else {
GEN_INSN(LDU32, value, memory_data, offset1);
}
break;
}
case 8:
{
if (sign) {
GEN_INSN(LDI64, value, memory_data, offset1);
}
else {
GEN_INSN(LDU64, value, memory_data, offset1);
}
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
PUSH_I64(value);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_load(JitCompContext *cc, uint32 align, uint32 offset)
{
JitReg addr, offset1, value, memory_data;
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, 4);
if (!offset1) {
goto fail;
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
value = jit_cc_new_reg_F32(cc);
GEN_INSN(LDF32, value, memory_data, offset1);
PUSH_F32(value);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_load(JitCompContext *cc, uint32 align, uint32 offset)
{
JitReg addr, offset1, value, memory_data;
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, 8);
if (!offset1) {
goto fail;
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
value = jit_cc_new_reg_F64(cc);
GEN_INSN(LDF64, value, memory_data, offset1);
PUSH_F64(value);
return true;
fail:
return false;
}
bool
jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic)
{
JitReg value, addr, offset1, memory_data;
POP_I32(value);
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1) {
goto fail;
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
switch (bytes) {
case 1:
{
GEN_INSN(STI8, value, memory_data, offset1);
break;
}
case 2:
{
GEN_INSN(STI16, value, memory_data, offset1);
break;
}
case 4:
{
GEN_INSN(STI32, value, memory_data, offset1);
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
return true;
fail:
return false;
}
bool
jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic)
{
JitReg value, addr, offset1, memory_data;
POP_I64(value);
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1) {
goto fail;
}
if (jit_reg_is_const(value) && bytes < 8) {
value = NEW_CONST(I32, (int32)jit_cc_get_const_I64(cc, value));
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
switch (bytes) {
case 1:
{
GEN_INSN(STI8, value, memory_data, offset1);
break;
}
case 2:
{
GEN_INSN(STI16, value, memory_data, offset1);
break;
}
case 4:
{
GEN_INSN(STI32, value, memory_data, offset1);
break;
}
case 8:
{
GEN_INSN(STI64, value, memory_data, offset1);
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
return true;
fail:
return false;
}
bool
jit_compile_op_f32_store(JitCompContext *cc, uint32 align, uint32 offset)
{
JitReg value, addr, offset1, memory_data;
POP_F32(value);
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, 4);
if (!offset1) {
goto fail;
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
GEN_INSN(STF32, value, memory_data, offset1);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_store(JitCompContext *cc, uint32 align, uint32 offset)
{
JitReg value, addr, offset1, memory_data;
POP_F64(value);
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, 8);
if (!offset1) {
goto fail;
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
GEN_INSN(STF64, value, memory_data, offset1);
return true;
fail:
return false;
}
bool
jit_compile_op_memory_size(JitCompContext *cc, uint32 mem_idx)
{
JitReg mem_inst, res;
mem_inst = get_memory_inst_reg(cc->jit_frame, mem_idx);
res = jit_cc_new_reg_I32(cc);
GEN_INSN(LDI32, res, mem_inst,
NEW_CONST(I32, offsetof(WASMMemoryInstance, cur_page_count)));
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_memory_grow(JitCompContext *cc, uint32 mem_idx)
{
JitReg memory_inst, grow_res, res;
JitReg prev_page_count, inc_page_count, args[2];
/* Get current page count */
memory_inst = get_memory_inst_reg(cc->jit_frame, mem_idx);
prev_page_count = jit_cc_new_reg_I32(cc);
GEN_INSN(LDI32, prev_page_count, memory_inst,
NEW_CONST(I32, offsetof(WASMMemoryInstance, cur_page_count)));
/* Call wasm_enlarge_memory */
POP_I32(inc_page_count);
grow_res = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = inc_page_count;
if (!jit_emit_callnative(cc, wasm_enlarge_memory, grow_res, args, 2)) {
goto fail;
}
/* Convert bool to uint32 */
GEN_INSN(AND, grow_res, grow_res, NEW_CONST(I32, 0xFF));
/* return different values according to memory.grow result */
res = jit_cc_new_reg_I32(cc);
GEN_INSN(CMP, cc->cmp_reg, grow_res, NEW_CONST(I32, 0));
GEN_INSN(SELECTNE, res, cc->cmp_reg, prev_page_count,
NEW_CONST(I32, (int32)-1));
PUSH_I32(res);
/* Ensure a refresh in next get memory related registers */
clear_memory_regs(cc->jit_frame);
return true;
fail:
return false;
}
#if WASM_ENABLE_BULK_MEMORY != 0
static int
wasm_init_memory(WASMModuleInstance *inst, uint32 mem_idx, uint32 seg_idx,
uint32 len, uint32 mem_offset, uint32 data_offset)
{
WASMMemoryInstance *mem_inst;
WASMDataSeg *data_segment;
uint32 mem_size;
uint8 *mem_addr, *data_addr;
/* if d + n > the length of mem.data */
mem_inst = inst->memories[mem_idx];
mem_size = mem_inst->cur_page_count * mem_inst->num_bytes_per_page;
if (mem_size < mem_offset || mem_size - mem_offset < len)
goto out_of_bounds;
/* if s + n > the length of data.data */
bh_assert(seg_idx < inst->module->data_seg_count);
data_segment = inst->module->data_segments[seg_idx];
if (data_segment->data_length < data_offset
|| data_segment->data_length - data_offset < len)
goto out_of_bounds;
mem_addr = mem_inst->memory_data + mem_offset;
data_addr = data_segment->data + data_offset;
bh_memcpy_s(mem_addr, mem_size - mem_offset, data_addr, len);
return 0;
out_of_bounds:
wasm_set_exception(inst, "out of bounds memory access");
return -1;
}
bool
jit_compile_op_memory_init(JitCompContext *cc, uint32 mem_idx, uint32 seg_idx)
{
JitReg len, mem_offset, data_offset, res;
JitReg args[6] = { 0 };
POP_I32(len);
POP_I32(data_offset);
POP_I32(mem_offset);
res = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = NEW_CONST(I32, mem_idx);
args[2] = NEW_CONST(I32, seg_idx);
args[3] = len;
args[4] = mem_offset;
args[5] = data_offset;
if (!jit_emit_callnative(cc, wasm_init_memory, res, args,
sizeof(args) / sizeof(args[0])))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BLTS,
cc->cmp_reg, NULL))
goto fail;
return true;
fail:
return false;
}
bool
jit_compile_op_data_drop(JitCompContext *cc, uint32 seg_idx)
{
JitReg module = get_module_reg(cc->jit_frame);
JitReg data_segments = jit_cc_new_reg_ptr(cc);
JitReg data_segment = jit_cc_new_reg_ptr(cc);
GEN_INSN(LDPTR, data_segments, module,
NEW_CONST(I32, offsetof(WASMModule, data_segments)));
GEN_INSN(LDPTR, data_segment, data_segments,
NEW_CONST(I32, seg_idx * sizeof(WASMDataSeg *)));
GEN_INSN(STI32, NEW_CONST(I32, 0), data_segment,
NEW_CONST(I32, offsetof(WASMDataSeg, data_length)));
return true;
}
static int
wasm_copy_memory(WASMModuleInstance *inst, uint32 src_mem_idx,
uint32 dst_mem_idx, uint32 len, uint32 src_offset,
uint32 dst_offset)
{
WASMMemoryInstance *src_mem, *dst_mem;
uint32 src_mem_size, dst_mem_size;
uint8 *src_addr, *dst_addr;
src_mem = inst->memories[src_mem_idx];
dst_mem = inst->memories[dst_mem_idx];
src_mem_size = src_mem->cur_page_count * src_mem->num_bytes_per_page;
dst_mem_size = dst_mem->cur_page_count * dst_mem->num_bytes_per_page;
/* if s + n > the length of mem.data */
if (src_mem_size < src_offset || src_mem_size - src_offset < len)
goto out_of_bounds;
/* if d + n > the length of mem.data */
if (dst_mem_size < dst_offset || dst_mem_size - dst_offset < len)
goto out_of_bounds;
src_addr = src_mem->memory_data + src_offset;
dst_addr = dst_mem->memory_data + dst_offset;
/* allowing the destination and source to overlap */
bh_memmove_s(dst_addr, dst_mem_size - dst_offset, src_addr, len);
return 0;
out_of_bounds:
wasm_set_exception(inst, "out of bounds memory access");
return -1;
}
bool
jit_compile_op_memory_copy(JitCompContext *cc, uint32 src_mem_idx,
uint32 dst_mem_idx)
{
JitReg len, src, dst, res;
JitReg args[6] = { 0 };
POP_I32(len);
POP_I32(src);
POP_I32(dst);
res = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = NEW_CONST(I32, src_mem_idx);
args[2] = NEW_CONST(I32, dst_mem_idx);
args[3] = len;
args[4] = src;
args[5] = dst;
if (!jit_emit_callnative(cc, wasm_copy_memory, res, args,
sizeof(args) / sizeof(args[0])))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BLTS,
cc->cmp_reg, NULL))
goto fail;
return true;
fail:
return false;
}
static int
wasm_fill_memory(WASMModuleInstance *inst, uint32 mem_idx, uint32 len,
uint32 val, uint32 dst)
{
WASMMemoryInstance *mem_inst;
uint32 mem_size;
uint8 *dst_addr;
mem_inst = inst->memories[mem_idx];
mem_size = mem_inst->cur_page_count * mem_inst->num_bytes_per_page;
if (mem_size < dst || mem_size - dst < len)
goto out_of_bounds;
dst_addr = mem_inst->memory_data + dst;
memset(dst_addr, val, len);
return 0;
out_of_bounds:
wasm_set_exception(inst, "out of bounds memory access");
return -1;
}
bool
jit_compile_op_memory_fill(JitCompContext *cc, uint32 mem_idx)
{
JitReg res, len, val, dst;
JitReg args[5] = { 0 };
POP_I32(len);
POP_I32(val);
POP_I32(dst);
res = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = NEW_CONST(I32, mem_idx);
args[2] = len;
args[3] = val;
args[4] = dst;
if (!jit_emit_callnative(cc, wasm_fill_memory, res, args,
sizeof(args) / sizeof(args[0])))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BLTS,
cc->cmp_reg, NULL))
goto fail;
return true;
fail:
return false;
}
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
bool
jit_compile_op_atomic_rmw(JitCompContext *cc, uint8 atomic_op, uint8 op_type,
uint32 align, uint32 offset, uint32 bytes)
{
return false;
}
bool
jit_compile_op_atomic_cmpxchg(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes)
{
return false;
}
bool
jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes)
{
return false;
}
bool
jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes)
{
return false;
}
#endif

View File

@ -0,0 +1,89 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_MEMORY_H_
#define _JIT_EMIT_MEMORY_H_
#include "../jit_compiler.h"
#if WASM_ENABLE_SHARED_MEMORY != 0
#include "../../common/wasm_shared_memory.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic);
bool
jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic);
bool
jit_compile_op_f32_load(JitCompContext *cc, uint32 align, uint32 offset);
bool
jit_compile_op_f64_load(JitCompContext *cc, uint32 align, uint32 offset);
bool
jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic);
bool
jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic);
bool
jit_compile_op_f32_store(JitCompContext *cc, uint32 align, uint32 offset);
bool
jit_compile_op_f64_store(JitCompContext *cc, uint32 align, uint32 offset);
bool
jit_compile_op_memory_size(JitCompContext *cc, uint32 mem_idx);
bool
jit_compile_op_memory_grow(JitCompContext *cc, uint32 mem_idx);
#if WASM_ENABLE_BULK_MEMORY != 0
bool
jit_compile_op_memory_init(JitCompContext *cc, uint32 mem_idx, uint32 seg_idx);
bool
jit_compile_op_data_drop(JitCompContext *cc, uint32 seg_idx);
bool
jit_compile_op_memory_copy(JitCompContext *cc, uint32 src_mem_idx,
uint32 dst_mem_idx);
bool
jit_compile_op_memory_fill(JitCompContext *cc, uint32 mem_idx);
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
bool
jit_compile_op_atomic_rmw(JitCompContext *cc, uint8 atomic_op, uint8 op_type,
uint32 align, uint32 offset, uint32 bytes);
bool
jit_compile_op_atomic_cmpxchg(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes);
bool
jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes);
bool
jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes);
#endif
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_MEMORY_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,76 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_NUMBERIC_H_
#define _JIT_EMIT_NUMBERIC_H_
#include "../jit_compiler.h"
#include "../jit_frontend.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_clz(JitCompContext *cc);
bool
jit_compile_op_i32_ctz(JitCompContext *cc);
bool
jit_compile_op_i32_popcnt(JitCompContext *cc);
bool
jit_compile_op_i64_clz(JitCompContext *cc);
bool
jit_compile_op_i64_ctz(JitCompContext *cc);
bool
jit_compile_op_i64_popcnt(JitCompContext *cc);
bool
jit_compile_op_i32_arithmetic(JitCompContext *cc, IntArithmetic arith_op,
uint8 **p_frame_ip);
bool
jit_compile_op_i64_arithmetic(JitCompContext *cc, IntArithmetic arith_op,
uint8 **p_frame_ip);
bool
jit_compile_op_i32_bitwise(JitCompContext *cc, IntBitwise bitwise_op);
bool
jit_compile_op_i64_bitwise(JitCompContext *cc, IntBitwise bitwise_op);
bool
jit_compile_op_i32_shift(JitCompContext *cc, IntShift shift_op);
bool
jit_compile_op_i64_shift(JitCompContext *cc, IntShift shift_op);
bool
jit_compile_op_f32_math(JitCompContext *cc, FloatMath math_op);
bool
jit_compile_op_f64_math(JitCompContext *cc, FloatMath math_op);
bool
jit_compile_op_f32_arithmetic(JitCompContext *cc, FloatArithmetic arith_op);
bool
jit_compile_op_f64_arithmetic(JitCompContext *cc, FloatArithmetic arith_op);
bool
jit_compile_op_f32_copysign(JitCompContext *cc);
bool
jit_compile_op_f64_copysign(JitCompContext *cc);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_NUMBERIC_H_ */

View File

@ -0,0 +1,130 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_parametric.h"
#include "../jit_frontend.h"
static bool
pop_value_from_wasm_stack(JitCompContext *cc, bool is_32bit, JitReg *p_value,
uint8 *p_type)
{
JitValue *jit_value;
JitReg value;
uint8 type;
if (!jit_block_stack_top(&cc->block_stack)) {
jit_set_last_error(cc, "WASM block stack underflow.");
return false;
}
if (!jit_block_stack_top(&cc->block_stack)->value_stack.value_list_end) {
jit_set_last_error(cc, "WASM data stack underflow.");
return false;
}
jit_value = jit_value_stack_pop(
&jit_block_stack_top(&cc->block_stack)->value_stack);
type = jit_value->type;
if (p_type != NULL) {
*p_type = jit_value->type;
}
wasm_runtime_free(jit_value);
/* is_32: i32, f32, ref.func, ref.extern, v128 */
if (is_32bit
&& !(type == VALUE_TYPE_I32 || type == VALUE_TYPE_F32
#if WASM_ENABLE_REF_TYPES != 0
|| type == VALUE_TYPE_FUNCREF || type == VALUE_TYPE_EXTERNREF
#endif
|| type == VALUE_TYPE_V128)) {
jit_set_last_error(cc, "invalid WASM stack data type.");
return false;
}
/* !is_32: i64, f64 */
if (!is_32bit && !(type == VALUE_TYPE_I64 || type == VALUE_TYPE_F64)) {
jit_set_last_error(cc, "invalid WASM stack data type.");
return false;
}
switch (type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_FUNCREF:
case VALUE_TYPE_EXTERNREF:
#endif
value = pop_i32(cc->jit_frame);
break;
case VALUE_TYPE_I64:
value = pop_i64(cc->jit_frame);
break;
case VALUE_TYPE_F32:
value = pop_f32(cc->jit_frame);
break;
case VALUE_TYPE_F64:
value = pop_f64(cc->jit_frame);
break;
default:
bh_assert(0);
return false;
}
if (p_value != NULL) {
*p_value = value;
}
return true;
}
bool
jit_compile_op_drop(JitCompContext *cc, bool is_drop_32)
{
if (!pop_value_from_wasm_stack(cc, is_drop_32, NULL, NULL))
return false;
return true;
}
bool
jit_compile_op_select(JitCompContext *cc, bool is_select_32)
{
JitReg val1, val2, cond, selected;
uint8 val1_type, val2_type;
POP_I32(cond);
if (!pop_value_from_wasm_stack(cc, is_select_32, &val2, &val2_type)
|| !pop_value_from_wasm_stack(cc, is_select_32, &val1, &val1_type)) {
return false;
}
if (val1_type != val2_type) {
jit_set_last_error(cc, "invalid stack values with different type");
return false;
}
switch (val1_type) {
case VALUE_TYPE_I32:
selected = jit_cc_new_reg_I32(cc);
break;
case VALUE_TYPE_I64:
selected = jit_cc_new_reg_I64(cc);
break;
case VALUE_TYPE_F32:
selected = jit_cc_new_reg_F32(cc);
break;
case VALUE_TYPE_F64:
selected = jit_cc_new_reg_F64(cc);
break;
default:
bh_assert(0);
return false;
}
GEN_INSN(CMP, cc->cmp_reg, cond, NEW_CONST(I32, 0));
GEN_INSN(SELECTNE, selected, cc->cmp_reg, val1, val2);
PUSH(selected, val1_type);
return true;
fail:
return false;
}

View File

@ -0,0 +1,25 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_PARAMETRIC_H_
#define _JIT_EMIT_PARAMETRIC_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_drop(JitCompContext *cc, bool is_drop_32);
bool
jit_compile_op_select(JitCompContext *cc, bool is_select_32);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_PARAMETRIC_H_ */

View File

@ -0,0 +1,318 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_table.h"
#include "jit_emit_exception.h"
#include "jit_emit_function.h"
#include "../../interpreter/wasm_runtime.h"
#include "../jit_frontend.h"
#if WASM_ENABLE_REF_TYPES != 0
bool
jit_compile_op_elem_drop(JitCompContext *cc, uint32 tbl_seg_idx)
{
JitReg module, tbl_segs;
module = get_module_reg(cc->jit_frame);
tbl_segs = jit_cc_new_reg_ptr(cc);
GEN_INSN(LDPTR, tbl_segs, module,
NEW_CONST(I32, offsetof(WASMModule, table_segments)));
GEN_INSN(STI32, NEW_CONST(I32, true), tbl_segs,
NEW_CONST(I32, tbl_seg_idx * sizeof(WASMTableSeg)
+ offsetof(WASMTableSeg, is_dropped)));
return true;
}
bool
jit_compile_op_table_get(JitCompContext *cc, uint32 tbl_idx)
{
JitReg elem_idx, tbl_sz, tbl_data, elem_idx_long, offset, res;
POP_I32(elem_idx);
/* if (elem_idx >= tbl_sz) goto exception; */
tbl_sz = get_table_cur_size_reg(cc->jit_frame, tbl_idx);
GEN_INSN(CMP, cc->cmp_reg, elem_idx, tbl_sz);
if (!jit_emit_exception(cc, JIT_EXCE_OUT_OF_BOUNDS_TABLE_ACCESS,
JIT_OP_BGEU, cc->cmp_reg, NULL))
goto fail;
elem_idx_long = jit_cc_new_reg_I64(cc);
GEN_INSN(I32TOI64, elem_idx_long, elem_idx);
offset = jit_cc_new_reg_I64(cc);
GEN_INSN(MUL, offset, elem_idx_long, NEW_CONST(I64, sizeof(uint32)));
res = jit_cc_new_reg_I32(cc);
tbl_data = get_table_data_reg(cc->jit_frame, tbl_idx);
GEN_INSN(LDI32, res, tbl_data, offset);
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_table_set(JitCompContext *cc, uint32 tbl_idx)
{
JitReg elem_idx, elem_val, tbl_sz, tbl_data, elem_idx_long, offset;
POP_I32(elem_val);
POP_I32(elem_idx);
/* if (elem_idx >= tbl_sz) goto exception; */
tbl_sz = get_table_cur_size_reg(cc->jit_frame, tbl_idx);
GEN_INSN(CMP, cc->cmp_reg, elem_idx, tbl_sz);
if (!jit_emit_exception(cc, JIT_EXCE_OUT_OF_BOUNDS_TABLE_ACCESS,
JIT_OP_BGEU, cc->cmp_reg, NULL))
goto fail;
elem_idx_long = jit_cc_new_reg_I64(cc);
GEN_INSN(I32TOI64, elem_idx_long, elem_idx);
offset = jit_cc_new_reg_I64(cc);
GEN_INSN(MUL, offset, elem_idx_long, NEW_CONST(I64, sizeof(uint32)));
tbl_data = get_table_data_reg(cc->jit_frame, tbl_idx);
GEN_INSN(STI32, elem_val, tbl_data, offset);
return true;
fail:
return false;
}
static int
wasm_init_table(WASMModuleInstance *inst, uint32 tbl_idx, uint32 elem_idx,
uint32 dst, uint32 len, uint32 src)
{
WASMTableInstance *tbl;
uint32 tbl_sz;
WASMTableSeg *elem;
uint32 elem_len;
tbl = inst->tables[tbl_idx];
tbl_sz = tbl->cur_size;
if (dst > tbl_sz || tbl_sz - dst < len)
goto out_of_bounds;
elem = inst->module->table_segments + elem_idx;
elem_len = elem->function_count;
if (src > elem_len || elem_len - src < len)
goto out_of_bounds;
bh_memcpy_s((uint8 *)(tbl) + offsetof(WASMTableInstance, base_addr)
+ dst * sizeof(uint32),
(uint32)((tbl_sz - dst) * sizeof(uint32)),
elem->func_indexes + src, (uint32)(len * sizeof(uint32)));
return 0;
out_of_bounds:
wasm_set_exception(inst, "out of bounds table access");
return -1;
}
bool
jit_compile_op_table_init(JitCompContext *cc, uint32 tbl_idx,
uint32 tbl_seg_idx)
{
JitReg len, src, dst, res;
JitReg args[6] = { 0 };
POP_I32(len);
POP_I32(src);
POP_I32(dst);
res = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = NEW_CONST(I32, tbl_idx);
args[2] = NEW_CONST(I32, tbl_seg_idx);
args[3] = dst;
args[4] = len;
args[5] = src;
if (!jit_emit_callnative(cc, wasm_init_table, res, args,
sizeof(args) / sizeof(args[0])))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BLTS,
cc->cmp_reg, NULL))
goto fail;
return true;
fail:
return false;
}
static int
wasm_copy_table(WASMModuleInstance *inst, uint32 src_tbl_idx,
uint32 dst_tbl_idx, uint32 dst_offset, uint32 len,
uint32 src_offset)
{
WASMTableInstance *src_tbl, *dst_tbl;
uint32 src_tbl_sz, dst_tbl_sz;
src_tbl = inst->tables[src_tbl_idx];
src_tbl_sz = src_tbl->cur_size;
if (src_offset > src_tbl_sz || src_tbl_sz - src_offset < len)
goto out_of_bounds;
dst_tbl = inst->tables[dst_tbl_idx];
dst_tbl_sz = dst_tbl->cur_size;
if (dst_offset > dst_tbl_sz || dst_tbl_sz - dst_offset < len)
goto out_of_bounds;
bh_memmove_s((uint8 *)(dst_tbl) + offsetof(WASMTableInstance, base_addr)
+ dst_offset * sizeof(uint32),
(uint32)((dst_tbl_sz - dst_offset) * sizeof(uint32)),
(uint8 *)(src_tbl) + offsetof(WASMTableInstance, base_addr)
+ src_offset * sizeof(uint32),
(uint32)(len * sizeof(uint32)));
return 0;
out_of_bounds:
wasm_set_exception(inst, "out of bounds table access");
return -1;
}
bool
jit_compile_op_table_copy(JitCompContext *cc, uint32 src_tbl_idx,
uint32 dst_tbl_idx)
{
JitReg len, src, dst, res;
JitReg args[6] = { 0 };
POP_I32(len);
POP_I32(src);
POP_I32(dst);
res = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = NEW_CONST(I32, src_tbl_idx);
args[2] = NEW_CONST(I32, dst_tbl_idx);
args[3] = dst;
args[4] = len;
args[5] = src;
if (!jit_emit_callnative(cc, wasm_copy_table, res, args,
sizeof(args) / sizeof(args[0])))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BLTS,
cc->cmp_reg, NULL))
goto fail;
return true;
fail:
return false;
}
bool
jit_compile_op_table_size(JitCompContext *cc, uint32 tbl_idx)
{
JitReg res;
res = get_table_cur_size_reg(cc->jit_frame, tbl_idx);
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_table_grow(JitCompContext *cc, uint32 tbl_idx)
{
JitReg tbl_sz, n, val, enlarge_ret, res;
JitReg args[4] = { 0 };
POP_I32(n);
POP_I32(val);
tbl_sz = get_table_cur_size_reg(cc->jit_frame, tbl_idx);
enlarge_ret = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = NEW_CONST(I32, tbl_idx);
args[2] = n;
args[3] = val;
if (!jit_emit_callnative(cc, wasm_enlarge_table, enlarge_ret, args,
sizeof(args) / sizeof(args[0])))
goto fail;
/* Convert bool to uint32 */
GEN_INSN(AND, enlarge_ret, enlarge_ret, NEW_CONST(I32, 0xFF));
res = jit_cc_new_reg_I32(cc);
GEN_INSN(CMP, cc->cmp_reg, enlarge_ret, NEW_CONST(I32, 1));
GEN_INSN(SELECTEQ, res, cc->cmp_reg, tbl_sz, NEW_CONST(I32, -1));
PUSH_I32(res);
/* Ensure a refresh in next get memory related registers */
clear_table_regs(cc->jit_frame);
return true;
fail:
return false;
}
static int
wasm_fill_table(WASMModuleInstance *inst, uint32 tbl_idx, uint32 dst,
uint32 val, uint32 len)
{
WASMTableInstance *tbl;
uint32 tbl_sz;
tbl = inst->tables[tbl_idx];
tbl_sz = tbl->cur_size;
if (dst > tbl_sz || tbl_sz - dst < len)
goto out_of_bounds;
for (; len != 0; dst++, len--) {
((uint32 *)(tbl->base_addr))[dst] = val;
}
return 0;
out_of_bounds:
wasm_set_exception(inst, "out of bounds table access");
return -1;
}
bool
jit_compile_op_table_fill(JitCompContext *cc, uint32 tbl_idx)
{
JitReg len, val, dst, res;
JitReg args[5] = { 0 };
POP_I32(len);
POP_I32(val);
POP_I32(dst);
res = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = NEW_CONST(I32, tbl_idx);
args[2] = dst;
args[3] = val;
args[4] = len;
if (!jit_emit_callnative(cc, wasm_fill_table, res, args,
sizeof(args) / sizeof(args[0])))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BLTS,
cc->cmp_reg, NULL))
goto fail;
return true;
fail:
return false;
}
#endif

View File

@ -0,0 +1,47 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_TABLE_H_
#define _JIT_EMIT_TABLE_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
#if WASM_ENABLE_REF_TYPES != 0
bool
jit_compile_op_elem_drop(JitCompContext *cc, uint32 tbl_seg_idx);
bool
jit_compile_op_table_get(JitCompContext *cc, uint32 tbl_idx);
bool
jit_compile_op_table_set(JitCompContext *cc, uint32 tbl_idx);
bool
jit_compile_op_table_init(JitCompContext *cc, uint32 tbl_idx,
uint32 tbl_seg_idx);
bool
jit_compile_op_table_copy(JitCompContext *cc, uint32 src_tbl_idx,
uint32 dst_tbl_idx);
bool
jit_compile_op_table_size(JitCompContext *cc, uint32 tbl_idx);
bool
jit_compile_op_table_grow(JitCompContext *cc, uint32 tbl_idx);
bool
jit_compile_op_table_fill(JitCompContext *cc, uint32 tbl_idx);
#endif
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif

View File

@ -0,0 +1,323 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_variable.h"
#include "jit_emit_exception.h"
#include "../jit_frontend.h"
#define CHECK_LOCAL(idx) \
do { \
if (idx \
>= wasm_func->func_type->param_count + wasm_func->local_count) { \
jit_set_last_error(cc, "local index out of range"); \
goto fail; \
} \
} while (0)
static uint8
get_local_type(const WASMFunction *wasm_func, uint32 local_idx)
{
uint32 param_count = wasm_func->func_type->param_count;
return local_idx < param_count
? wasm_func->func_type->types[local_idx]
: wasm_func->local_types[local_idx - param_count];
}
bool
jit_compile_op_get_local(JitCompContext *cc, uint32 local_idx)
{
WASMFunction *wasm_func = cc->cur_wasm_func;
uint16 *local_offsets = wasm_func->local_offsets;
uint16 local_offset;
uint8 local_type;
JitReg value = 0;
CHECK_LOCAL(local_idx);
local_offset = local_offsets[local_idx];
local_type = get_local_type(wasm_func, local_idx);
switch (local_type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
value = local_i32(cc->jit_frame, local_offset);
break;
case VALUE_TYPE_I64:
value = local_i64(cc->jit_frame, local_offset);
break;
case VALUE_TYPE_F32:
value = local_f32(cc->jit_frame, local_offset);
break;
case VALUE_TYPE_F64:
value = local_f64(cc->jit_frame, local_offset);
break;
default:
bh_assert(0);
break;
}
PUSH(value, local_type);
return true;
fail:
return false;
}
bool
jit_compile_op_set_local(JitCompContext *cc, uint32 local_idx)
{
WASMFunction *wasm_func = cc->cur_wasm_func;
uint16 *local_offsets = wasm_func->local_offsets;
uint16 local_offset;
uint8 local_type;
JitReg value;
CHECK_LOCAL(local_idx);
local_offset = local_offsets[local_idx];
local_type = get_local_type(wasm_func, local_idx);
switch (local_type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
POP_I32(value);
set_local_i32(cc->jit_frame, local_offset, value);
break;
case VALUE_TYPE_I64:
POP_I64(value);
set_local_i64(cc->jit_frame, local_offset, value);
break;
case VALUE_TYPE_F32:
POP_F32(value);
set_local_f32(cc->jit_frame, local_offset, value);
break;
case VALUE_TYPE_F64:
POP_F64(value);
set_local_f64(cc->jit_frame, local_offset, value);
break;
default:
bh_assert(0);
break;
}
return true;
fail:
return false;
}
bool
jit_compile_op_tee_local(JitCompContext *cc, uint32 local_idx)
{
WASMFunction *wasm_func = cc->cur_wasm_func;
uint16 *local_offsets = wasm_func->local_offsets;
uint16 local_offset;
uint8 local_type;
JitReg value = 0;
CHECK_LOCAL(local_idx);
local_offset = local_offsets[local_idx];
local_type = get_local_type(wasm_func, local_idx);
switch (local_type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
POP_I32(value);
set_local_i32(cc->jit_frame, local_offset, value);
PUSH_I32(value);
break;
case VALUE_TYPE_I64:
POP_I64(value);
set_local_i64(cc->jit_frame, local_offset, value);
PUSH_I64(value);
break;
case VALUE_TYPE_F32:
POP_F32(value);
set_local_f32(cc->jit_frame, local_offset, value);
PUSH_F32(value);
break;
case VALUE_TYPE_F64:
POP_F64(value);
set_local_f64(cc->jit_frame, local_offset, value);
PUSH_F64(value);
break;
default:
bh_assert(0);
goto fail;
}
return true;
fail:
return false;
}
static uint8
get_global_type(const WASMModule *module, uint32 global_idx)
{
if (global_idx < module->import_global_count) {
const WASMGlobalImport *import_global =
&((module->import_globals + global_idx)->u.global);
return import_global->type;
}
else {
const WASMGlobal *global =
module->globals + (global_idx - module->import_global_count);
return global->type;
}
}
static uint32
get_global_data_offset(const WASMModule *module, uint32 global_idx)
{
if (global_idx < module->import_global_count) {
const WASMGlobalImport *import_global =
&((module->import_globals + global_idx)->u.global);
return import_global->data_offset;
}
else {
const WASMGlobal *global =
module->globals + (global_idx - module->import_global_count);
return global->data_offset;
}
}
bool
jit_compile_op_get_global(JitCompContext *cc, uint32 global_idx)
{
uint32 data_offset;
uint8 global_type = 0;
JitReg value = 0;
bh_assert(global_idx < cc->cur_wasm_module->import_global_count
+ cc->cur_wasm_module->global_count);
data_offset = get_global_data_offset(cc->cur_wasm_module, global_idx);
global_type = get_global_type(cc->cur_wasm_module, global_idx);
switch (global_type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
{
value = jit_cc_new_reg_I32(cc);
GEN_INSN(LDI32, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
case VALUE_TYPE_I64:
{
value = jit_cc_new_reg_I64(cc);
GEN_INSN(LDI64, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
case VALUE_TYPE_F32:
{
value = jit_cc_new_reg_F32(cc);
GEN_INSN(LDF32, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
case VALUE_TYPE_F64:
{
value = jit_cc_new_reg_F64(cc);
GEN_INSN(LDF64, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
default:
{
jit_set_last_error(cc, "unexpected global type");
goto fail;
}
}
PUSH(value, global_type);
return true;
fail:
return false;
}
bool
jit_compile_op_set_global(JitCompContext *cc, uint32 global_idx,
bool is_aux_stack)
{
uint32 data_offset;
uint8 global_type = 0;
JitReg value = 0;
bh_assert(global_idx < cc->cur_wasm_module->import_global_count
+ cc->cur_wasm_module->global_count);
data_offset = get_global_data_offset(cc->cur_wasm_module, global_idx);
global_type = get_global_type(cc->cur_wasm_module, global_idx);
switch (global_type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
{
POP_I32(value);
if (is_aux_stack) {
JitReg aux_stack_bound = get_aux_stack_bound_reg(cc->jit_frame);
JitReg aux_stack_bottom =
get_aux_stack_bottom_reg(cc->jit_frame);
GEN_INSN(CMP, cc->cmp_reg, value, aux_stack_bound);
if (!(jit_emit_exception(cc, JIT_EXCE_AUX_STACK_OVERFLOW,
JIT_OP_BLEU, cc->cmp_reg, NULL)))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, value, aux_stack_bottom);
if (!(jit_emit_exception(cc, JIT_EXCE_AUX_STACK_UNDERFLOW,
JIT_OP_BGTU, cc->cmp_reg, NULL)))
goto fail;
}
GEN_INSN(STI32, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
case VALUE_TYPE_I64:
{
POP_I64(value);
GEN_INSN(STI64, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
case VALUE_TYPE_F32:
{
POP_F32(value);
GEN_INSN(STF32, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
case VALUE_TYPE_F64:
{
POP_F64(value);
GEN_INSN(STF64, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
default:
{
jit_set_last_error(cc, "unexpected global type");
goto fail;
}
}
return true;
fail:
return false;
}

View File

@ -0,0 +1,35 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_VARIABLE_H_
#define _JIT_EMIT_VARIABLE_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_get_local(JitCompContext *cc, uint32 local_idx);
bool
jit_compile_op_set_local(JitCompContext *cc, uint32 local_idx);
bool
jit_compile_op_tee_local(JitCompContext *cc, uint32 local_idx);
bool
jit_compile_op_get_global(JitCompContext *cc, uint32 global_idx);
bool
jit_compile_op_set_global(JitCompContext *cc, uint32 global_idx,
bool is_aux_stack);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_VARIABLE_H_ */

View File

@ -0,0 +1,95 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set (IWASM_FAST_JIT_DIR ${CMAKE_CURRENT_LIST_DIR})
add_definitions(-DWASM_ENABLE_FAST_JIT=1)
if (WAMR_BUILD_FAST_JIT_DUMP EQUAL 1)
add_definitions(-DWASM_ENABLE_FAST_JIT_DUMP=1)
endif ()
include_directories (${IWASM_FAST_JIT_DIR})
if (WAMR_BUILD_TARGET STREQUAL "X86_64" OR WAMR_BUILD_TARGET STREQUAL "AMD_64")
include(FetchContent)
if (NOT WAMR_BUILD_PLATFORM STREQUAL "linux-sgx")
FetchContent_Declare(
asmjit
GIT_REPOSITORY https://github.com/asmjit/asmjit.git
)
else ()
FetchContent_Declare(
asmjit
GIT_REPOSITORY https://github.com/asmjit/asmjit.git
PATCH_COMMAND git apply ${IWASM_FAST_JIT_DIR}/asmjit_sgx_patch.diff
)
endif ()
FetchContent_GetProperties(asmjit)
if (NOT asmjit_POPULATED)
message ("-- Fetching asmjit ..")
FetchContent_Populate(asmjit)
add_definitions(-DASMJIT_STATIC)
add_definitions(-DASMJIT_NO_DEPRECATED)
add_definitions(-DASMJIT_NO_BUILDER)
add_definitions(-DASMJIT_NO_COMPILER)
add_definitions(-DASMJIT_NO_JIT)
add_definitions(-DASMJIT_NO_LOGGING)
add_definitions(-DASMJIT_NO_TEXT)
add_definitions(-DASMJIT_NO_VALIDATION)
add_definitions(-DASMJIT_NO_INTROSPECTION)
add_definitions(-DASMJIT_NO_INTRINSICS)
add_definitions(-DASMJIT_NO_AARCH64)
add_definitions(-DASMJIT_NO_AARCH32)
include_directories("${asmjit_SOURCE_DIR}/src")
add_subdirectory(${asmjit_SOURCE_DIR} ${asmjit_BINARY_DIR} EXCLUDE_FROM_ALL)
file (GLOB_RECURSE cpp_source_asmjit
${asmjit_SOURCE_DIR}/src/asmjit/core/*.cpp
${asmjit_SOURCE_DIR}/src/asmjit/x86/*.cpp
)
endif ()
if (WAMR_BUILD_FAST_JIT_DUMP EQUAL 1)
FetchContent_Declare(
zycore
GIT_REPOSITORY https://github.com/zyantific/zycore-c.git
)
FetchContent_GetProperties(zycore)
if (NOT zycore_POPULATED)
message ("-- Fetching zycore ..")
FetchContent_Populate(zycore)
option(ZYDIS_BUILD_TOOLS "" OFF)
option(ZYDIS_BUILD_EXAMPLES "" OFF)
include_directories("${zycore_SOURCE_DIR}/include")
include_directories("${zycore_BINARY_DIR}")
add_subdirectory(${zycore_SOURCE_DIR} ${zycore_BINARY_DIR} EXCLUDE_FROM_ALL)
file (GLOB_RECURSE c_source_zycore ${zycore_SOURCE_DIR}/src/*.c)
endif ()
FetchContent_Declare(
zydis
GIT_REPOSITORY https://github.com/zyantific/zydis.git
GIT_TAG e14a07895136182a5b53e181eec3b1c6e0b434de
)
FetchContent_GetProperties(zydis)
if (NOT zydis_POPULATED)
message ("-- Fetching zydis ..")
FetchContent_Populate(zydis)
option(ZYDIS_BUILD_TOOLS "" OFF)
option(ZYDIS_BUILD_EXAMPLES "" OFF)
include_directories("${zydis_BINARY_DIR}")
include_directories("${zydis_SOURCE_DIR}/include")
include_directories("${zydis_SOURCE_DIR}/src")
add_subdirectory(${zydis_SOURCE_DIR} ${zydis_BINARY_DIR} EXCLUDE_FROM_ALL)
file (GLOB_RECURSE c_source_zydis ${zydis_SOURCE_DIR}/src/*.c)
endif ()
endif ()
endif ()
file (GLOB c_source_jit ${IWASM_FAST_JIT_DIR}/*.c ${IWASM_FAST_JIT_DIR}/fe/*.c)
if (WAMR_BUILD_TARGET STREQUAL "X86_64" OR WAMR_BUILD_TARGET STREQUAL "AMD_64")
file (GLOB_RECURSE cpp_source_jit_cg ${IWASM_FAST_JIT_DIR}/cg/x86-64/*.cpp)
else ()
message (FATAL_ERROR "Fast JIT codegen for target ${WAMR_BUILD_TARGET} isn't implemented")
endif ()
set (IWASM_FAST_JIT_SOURCE ${c_source_jit} ${cpp_source_jit_cg}
${cpp_source_asmjit} ${c_source_zycore} ${c_source_zydis})

View File

@ -0,0 +1,65 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_codecache.h"
#include "mem_alloc.h"
#include "jit_compiler.h"
static void *code_cache_pool = NULL;
static uint32 code_cache_pool_size = 0;
static mem_allocator_t code_cache_pool_allocator = NULL;
bool
jit_code_cache_init(uint32 code_cache_size)
{
int map_prot = MMAP_PROT_READ | MMAP_PROT_WRITE | MMAP_PROT_EXEC;
int map_flags = MMAP_MAP_NONE;
if (!(code_cache_pool =
os_mmap(NULL, code_cache_size, map_prot, map_flags))) {
return false;
}
if (!(code_cache_pool_allocator =
mem_allocator_create(code_cache_pool, code_cache_size))) {
os_munmap(code_cache_pool, code_cache_size);
code_cache_pool = NULL;
return false;
}
code_cache_pool_size = code_cache_size;
return true;
}
void
jit_code_cache_destroy()
{
mem_allocator_destroy(code_cache_pool_allocator);
os_munmap(code_cache_pool, code_cache_pool_size);
}
void *
jit_code_cache_alloc(uint32 size)
{
return mem_allocator_malloc(code_cache_pool_allocator, size);
}
void
jit_code_cache_free(void *ptr)
{
if (ptr)
mem_allocator_free(code_cache_pool_allocator, ptr);
}
bool
jit_pass_register_jitted_code(JitCompContext *cc)
{
uint32 jit_func_idx =
cc->cur_wasm_func_idx - cc->cur_wasm_module->import_function_count;
cc->cur_wasm_func->fast_jit_jitted_code = cc->jitted_addr_begin;
cc->cur_wasm_module->fast_jit_func_ptrs[jit_func_idx] =
cc->jitted_addr_begin;
return true;
}

View File

@ -0,0 +1,31 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_CODE_CACHE_H_
#define _JIT_CODE_CACHE_H_
#include "bh_platform.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_code_cache_init(uint32 code_cache_size);
void
jit_code_cache_destroy();
void *
jit_code_cache_alloc(uint32 size);
void
jit_code_cache_free(void *ptr);
#ifdef __cplusplus
}
#endif
#endif /* end of _JIT_CODE_CACHE_H_ */

View File

@ -0,0 +1,22 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_compiler.h"
#include "jit_codegen.h"
bool
jit_pass_lower_cg(JitCompContext *cc)
{
return jit_codegen_lower(cc);
}
bool
jit_pass_codegen(JitCompContext *cc)
{
if (!jit_annl_enable_jitted_addr(cc))
return false;
return jit_codegen_gen_native(cc);
}

View File

@ -0,0 +1,84 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_CODEGEN_H_
#define _JIT_CODEGEN_H_
#include "bh_platform.h"
#include "jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Initialize codegen module, such as instruction encoder.
*
* @return true if succeeded; false if failed.
*/
bool
jit_codegen_init();
/**
* Destroy codegen module, such as instruction encoder.
*/
void
jit_codegen_destroy();
/**
* Get hard register information of each kind.
*
* @return the JitHardRegInfo array of each kind
*/
const JitHardRegInfo *
jit_codegen_get_hreg_info();
/**
* Get hard register by name.
*
* @param name the name of the hard register
*
* @return the hard register of the name
*/
JitReg
jit_codegen_get_hreg_by_name(const char *name);
/**
* Generate native code for the given compilation context
*
* @param cc the compilation context that is ready to do codegen
*
* @return true if succeeds, false otherwise
*/
bool
jit_codegen_gen_native(JitCompContext *cc);
/**
* lower unsupported operations to supported ones for the target.
*
* @param cc the compilation context that is ready to do codegen
*
* @return true if succeeds, false otherwise
*/
bool
jit_codegen_lower(JitCompContext *cc);
/**
* Dump native code in the given range to assembly.
*
* @param begin_addr begin address of the native code
* @param end_addr end address of the native code
*/
void
jit_codegen_dump_native(void *begin_addr, void *end_addr);
int
jit_codegen_interp_jitted_glue(void *self, JitInterpSwitchInfo *info, void *pc);
#ifdef __cplusplus
}
#endif
#endif /* end of _JIT_CODEGEN_H_ */

View File

@ -0,0 +1,176 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_compiler.h"
#include "jit_ir.h"
#include "jit_codegen.h"
#include "jit_codecache.h"
#include "../interpreter/wasm.h"
typedef struct JitCompilerPass {
/* Name of the pass. */
const char *name;
/* The entry of the compiler pass. */
bool (*run)(JitCompContext *cc);
} JitCompilerPass;
/* clang-format off */
static JitCompilerPass compiler_passes[] = {
{ NULL, NULL },
#define REG_PASS(name) { #name, jit_pass_##name }
REG_PASS(dump),
REG_PASS(update_cfg),
REG_PASS(frontend),
REG_PASS(lower_cg),
REG_PASS(regalloc),
REG_PASS(codegen),
REG_PASS(register_jitted_code)
#undef REG_PASS
};
/* Number of compiler passes. */
#define COMPILER_PASS_NUM (sizeof(compiler_passes) / sizeof(compiler_passes[0]))
#if WASM_ENABLE_FAST_JIT_DUMP == 0
static const uint8 compiler_passes_without_dump[] = {
3, 4, 5, 6, 7, 0
};
#else
static const uint8 compiler_passes_with_dump[] = {
3, 2, 1, 4, 1, 5, 1, 6, 1, 7, 0
};
#endif
/* The exported global data of JIT compiler. */
static JitGlobals jit_globals = {
#if WASM_ENABLE_FAST_JIT_DUMP == 0
.passes = compiler_passes_without_dump,
#else
.passes = compiler_passes_with_dump,
#endif
.return_to_interp_from_jitted = NULL
};
/* clang-format on */
static bool
apply_compiler_passes(JitCompContext *cc)
{
const uint8 *p = jit_globals.passes;
for (; *p; p++) {
/* Set the pass NO. */
cc->cur_pass_no = p - jit_globals.passes;
bh_assert(*p < COMPILER_PASS_NUM);
if (!compiler_passes[*p].run(cc)) {
LOG_VERBOSE("JIT: compilation failed at pass[%td] = %s\n",
p - jit_globals.passes, compiler_passes[*p].name);
return false;
}
}
return true;
}
bool
jit_compiler_init(const JitCompOptions *options)
{
uint32 code_cache_size = options->code_cache_size > 0
? options->code_cache_size
: FAST_JIT_DEFAULT_CODE_CACHE_SIZE;
LOG_VERBOSE("JIT: compiler init with code cache size: %u\n",
code_cache_size);
if (!jit_code_cache_init(code_cache_size))
return false;
if (!jit_codegen_init())
goto fail1;
return true;
fail1:
jit_code_cache_destroy();
return false;
}
void
jit_compiler_destroy()
{
jit_codegen_destroy();
jit_code_cache_destroy();
}
JitGlobals *
jit_compiler_get_jit_globals()
{
return &jit_globals;
}
const char *
jit_compiler_get_pass_name(unsigned i)
{
return i < COMPILER_PASS_NUM ? compiler_passes[i].name : NULL;
}
bool
jit_compiler_compile(WASMModule *module, uint32 func_idx)
{
JitCompContext *cc;
char *last_error;
bool ret = true;
/* Initialize compilation context. */
if (!(cc = jit_calloc(sizeof(*cc))))
return false;
if (!jit_cc_init(cc, 64)) {
jit_free(cc);
return false;
}
cc->cur_wasm_module = module;
cc->cur_wasm_func =
module->functions[func_idx - module->import_function_count];
cc->cur_wasm_func_idx = func_idx;
cc->mem_space_unchanged = (!cc->cur_wasm_func->has_op_memory_grow
&& !cc->cur_wasm_func->has_op_func_call)
|| (!module->possible_memory_grow);
/* Apply compiler passes. */
if (!apply_compiler_passes(cc) || jit_get_last_error(cc)) {
last_error = jit_get_last_error(cc);
os_printf("fast jit compilation failed: %s\n",
last_error ? last_error : "unknown error");
ret = false;
}
/* Delete the compilation context. */
jit_cc_delete(cc);
return ret;
}
bool
jit_compiler_compile_all(WASMModule *module)
{
uint32 i;
for (i = 0; i < module->function_count; i++) {
if (!jit_compiler_compile(module, module->import_function_count + i)) {
return false;
}
}
return true;
}
int
jit_interp_switch_to_jitted(void *exec_env, JitInterpSwitchInfo *info, void *pc)
{
return jit_codegen_interp_jitted_glue(exec_env, info, pc);
}

View File

@ -0,0 +1,143 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_COMPILER_H_
#define _JIT_COMPILER_H_
#include "bh_platform.h"
#include "../interpreter/wasm_runtime.h"
#include "jit_ir.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct JitGlobals {
/* Compiler pass sequence, the last element must be 0 */
const uint8 *passes;
char *return_to_interp_from_jitted;
} JitGlobals;
/**
* Actions the interpreter should do when jitted code returns to
* interpreter.
*/
typedef enum JitInterpAction {
JIT_INTERP_ACTION_NORMAL, /* normal execution */
JIT_INTERP_ACTION_THROWN, /* exception was thrown */
JIT_INTERP_ACTION_CALL /* call wasm function */
} JitInterpAction;
/**
* Information exchanged between jitted code and interpreter.
*/
typedef struct JitInterpSwitchInfo {
/* Points to the frame that is passed to jitted code and the frame
that is returned from jitted code */
void *frame;
/* Output values from jitted code of different actions */
union {
/* IP and SP offsets for NORMAL */
struct {
int32 ip;
int32 sp;
} normal;
/* Function called from jitted code for CALL */
struct {
void *function;
} call;
/* Returned integer and/or floating point values for RETURN. This
is also used to pass return values from interpreter to jitted
code if the caller is in jitted code and the callee is in
interpreter. */
struct {
uint32 ival[2];
uint32 fval[2];
uint32 last_return_type;
} ret;
} out;
} JitInterpSwitchInfo;
/* Jit compiler options */
typedef struct JitCompOptions {
uint32 code_cache_size;
uint32 opt_level;
} JitCompOptions;
bool
jit_compiler_init(const JitCompOptions *option);
void
jit_compiler_destroy();
JitGlobals *
jit_compiler_get_jit_globals();
const char *
jit_compiler_get_pass_name(unsigned i);
bool
jit_compiler_compile(WASMModule *module, uint32 func_idx);
bool
jit_compiler_compile_all(WASMModule *module);
int
jit_interp_switch_to_jitted(void *self, JitInterpSwitchInfo *info, void *pc);
/*
* Pass declarations:
*/
/**
* Dump the compilation context.
*/
bool
jit_pass_dump(JitCompContext *cc);
/**
* Update CFG (usually before dump for better readability).
*/
bool
jit_pass_update_cfg(JitCompContext *cc);
/**
* Translate profiling result into MIR.
*/
bool
jit_pass_frontend(JitCompContext *cc);
/**
* Lower unsupported operations into supported ones.
*/
bool
jit_pass_lower_cg(JitCompContext *cc);
/**
* Register allocation.
*/
bool
jit_pass_regalloc(JitCompContext *cc);
/**
* Native code generation.
*/
bool
jit_pass_codegen(JitCompContext *cc);
/**
* Register the jitted code so that it can be executed.
*/
bool
jit_pass_register_jitted_code(JitCompContext *cc);
#ifdef __cplusplus
}
#endif
#endif /* end of _JIT_COMPILER_H_ */

View File

@ -0,0 +1,331 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_dump.h"
#include "jit_compiler.h"
#include "jit_codegen.h"
void
jit_dump_reg(JitCompContext *cc, JitReg reg)
{
unsigned kind = jit_reg_kind(reg);
unsigned no = jit_reg_no(reg);
switch (kind) {
case JIT_REG_KIND_VOID:
os_printf("VOID");
break;
case JIT_REG_KIND_I32:
if (jit_reg_is_const(reg)) {
unsigned rel = jit_cc_get_const_I32_rel(cc, reg);
os_printf("0x%x", jit_cc_get_const_I32(cc, reg));
if (rel)
os_printf("(rel: 0x%x)", rel);
}
else
os_printf("i%d", no);
break;
case JIT_REG_KIND_I64:
if (jit_reg_is_const(reg))
os_printf("0x%llxL", jit_cc_get_const_I64(cc, reg));
else
os_printf("I%d", no);
break;
case JIT_REG_KIND_F32:
if (jit_reg_is_const(reg))
os_printf("%f", jit_cc_get_const_F32(cc, reg));
else
os_printf("f%d", no);
break;
case JIT_REG_KIND_F64:
if (jit_reg_is_const(reg))
os_printf("%fL", jit_cc_get_const_F64(cc, reg));
else
os_printf("D%d", no);
break;
case JIT_REG_KIND_L32:
os_printf("L%d", no);
break;
default:
bh_assert(!"Unsupported register kind.");
}
}
static void
jit_dump_insn_Reg(JitCompContext *cc, JitInsn *insn, unsigned opnd_num)
{
unsigned i;
for (i = 0; i < opnd_num; i++) {
os_printf(i == 0 ? " " : ", ");
jit_dump_reg(cc, *(jit_insn_opnd(insn, i)));
}
os_printf("\n");
}
static void
jit_dump_insn_VReg(JitCompContext *cc, JitInsn *insn, unsigned opnd_num)
{
unsigned i;
opnd_num = jit_insn_opndv_num(insn);
for (i = 0; i < opnd_num; i++) {
os_printf(i == 0 ? " " : ", ");
jit_dump_reg(cc, *(jit_insn_opndv(insn, i)));
}
os_printf("\n");
}
static void
jit_dump_insn_LookupSwitch(JitCompContext *cc, JitInsn *insn, unsigned opnd_num)
{
unsigned i;
JitOpndLookupSwitch *opnd = jit_insn_opndls(insn);
os_printf(" ");
jit_dump_reg(cc, opnd->value);
os_printf("\n%16s: ", "default");
jit_dump_reg(cc, opnd->default_target);
os_printf("\n");
for (i = 0; i < opnd->match_pairs_num; i++) {
os_printf("%18d: ", opnd->match_pairs[i].value);
jit_dump_reg(cc, opnd->match_pairs[i].target);
os_printf("\n");
}
}
void
jit_dump_insn(JitCompContext *cc, JitInsn *insn)
{
switch (insn->opcode) {
#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) \
case JIT_OP_##NAME: \
os_printf(" %-15s", #NAME); \
jit_dump_insn_##OPND_KIND(cc, insn, OPND_NUM); \
break;
#include "jit_ir.def"
#undef INSN
}
}
void
jit_dump_basic_block(JitCompContext *cc, JitBasicBlock *block)
{
unsigned i, label_index;
void *begin_addr, *end_addr;
JitBasicBlock *block_next;
JitInsn *insn;
JitRegVec preds = jit_basic_block_preds(block);
JitRegVec succs = jit_basic_block_succs(block);
JitReg label = jit_basic_block_label(block), label_next;
JitReg *reg;
jit_dump_reg(cc, label);
os_printf(":\n ; PREDS(");
JIT_REG_VEC_FOREACH(preds, i, reg)
{
if (i > 0)
os_printf(" ");
jit_dump_reg(cc, *reg);
}
os_printf(")\n ;");
if (jit_annl_is_enabled_begin_bcip(cc))
os_printf(" BEGIN_BCIP=0x%04tx",
*(jit_annl_begin_bcip(cc, label))
- (uint8 *)cc->cur_wasm_module->load_addr);
if (jit_annl_is_enabled_end_bcip(cc))
os_printf(" END_BCIP=0x%04tx",
*(jit_annl_end_bcip(cc, label))
- (uint8 *)cc->cur_wasm_module->load_addr);
os_printf("\n");
if (jit_annl_is_enabled_jitted_addr(cc)) {
begin_addr = *(jit_annl_jitted_addr(cc, label));
if (label == cc->entry_label) {
block_next = cc->_ann._label_basic_block[2];
label_next = jit_basic_block_label(block_next);
end_addr = *(jit_annl_jitted_addr(cc, label_next));
}
else if (label == cc->exit_label) {
end_addr = cc->jitted_addr_end;
}
else {
label_index = jit_reg_no(label);
if (label_index < jit_cc_label_num(cc) - 1)
block_next = cc->_ann._label_basic_block[label_index + 1];
else
block_next = cc->_ann._label_basic_block[1];
label_next = jit_basic_block_label(block_next);
end_addr = *(jit_annl_jitted_addr(cc, label_next));
}
jit_codegen_dump_native(begin_addr, end_addr);
}
else {
/* Dump IR. */
JIT_FOREACH_INSN(block, insn) jit_dump_insn(cc, insn);
}
os_printf(" ; SUCCS(");
JIT_REG_VEC_FOREACH(succs, i, reg)
{
if (i > 0)
os_printf(" ");
jit_dump_reg(cc, *reg);
}
os_printf(")\n\n");
}
static void
dump_func_name(JitCompContext *cc)
{
const char *func_name = NULL;
WASMModule *module = cc->cur_wasm_module;
#if WASM_ENABLE_CUSTOM_NAME_SECTION != 0
func_name = cc->cur_wasm_func->field_name;
#endif
/* if custom name section is not generated,
search symbols from export table */
if (!func_name) {
uint32 i;
for (i = 0; i < module->export_count; i++) {
if (module->exports[i].kind == EXPORT_KIND_FUNC
&& module->exports[i].index == cc->cur_wasm_func_idx) {
func_name = module->exports[i].name;
break;
}
}
}
/* function name not exported, print number instead */
if (func_name == NULL) {
os_printf("$f%d", cc->cur_wasm_func_idx);
}
else {
os_printf("%s", func_name);
}
}
static void
dump_cc_ir(JitCompContext *cc)
{
unsigned i, end;
JitBasicBlock *block;
JitReg label;
const char *kind_names[] = { "VOID", "I32", "I64", "F32",
"F64", "V64", "V128", "V256" };
os_printf("; Function: ");
dump_func_name(cc);
os_printf("\n");
os_printf("; Constant table sizes:");
for (i = 0; i < JIT_REG_KIND_L32; i++)
os_printf(" %s=%d", kind_names[i], cc->_const_val._num[i]);
os_printf("\n; Label number: %d", jit_cc_label_num(cc));
os_printf("\n; Instruction number: %d", jit_cc_insn_num(cc));
os_printf("\n; Register numbers:");
for (i = 0; i < JIT_REG_KIND_L32; i++)
os_printf(" %s=%d", kind_names[i], jit_cc_reg_num(cc, i));
os_printf("\n; Label annotations:");
#define ANN_LABEL(TYPE, NAME) \
if (jit_annl_is_enabled_##NAME(cc)) \
os_printf(" %s", #NAME);
#include "jit_ir.def"
#undef ANN_LABEL
os_printf("\n; Instruction annotations:");
#define ANN_INSN(TYPE, NAME) \
if (jit_anni_is_enabled_##NAME(cc)) \
os_printf(" %s", #NAME);
#include "jit_ir.def"
#undef ANN_INSN
os_printf("\n; Register annotations:");
#define ANN_REG(TYPE, NAME) \
if (jit_annr_is_enabled_##NAME(cc)) \
os_printf(" %s", #NAME);
#include "jit_ir.def"
#undef ANN_REG
os_printf("\n\n");
if (jit_annl_is_enabled_next_label(cc)) {
/* Blocks have been reordered, use that order to dump. */
for (label = cc->entry_label; label;
label = *(jit_annl_next_label(cc, label)))
jit_dump_basic_block(cc, *(jit_annl_basic_block(cc, label)));
}
else {
/* Otherwise, use the default order. */
jit_dump_basic_block(cc, jit_cc_entry_basic_block(cc));
JIT_FOREACH_BLOCK(cc, i, end, block) jit_dump_basic_block(cc, block);
jit_dump_basic_block(cc, jit_cc_exit_basic_block(cc));
}
}
void
jit_dump_cc(JitCompContext *cc)
{
if (jit_cc_label_num(cc) <= 2)
return;
dump_cc_ir(cc);
}
bool
jit_pass_dump(JitCompContext *cc)
{
const JitGlobals *jit_globals = jit_compiler_get_jit_globals();
const uint8 *passes = jit_globals->passes;
uint8 pass_no = cc->cur_pass_no;
const char *pass_name =
pass_no > 0 ? jit_compiler_get_pass_name(passes[pass_no - 1]) : "NULL";
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
if (!strcmp(pass_name, "lower_cg"))
/* Ignore lower codegen pass as it does nothing in x86-64 */
return true;
#endif
os_printf("JIT.COMPILER.DUMP: PASS_NO=%d PREV_PASS=%s\n\n", pass_no,
pass_name);
jit_dump_cc(cc);
os_printf("\n");
return true;
}
bool
jit_pass_update_cfg(JitCompContext *cc)
{
return jit_cc_update_cfg(cc);
}

View File

@ -0,0 +1,54 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_DUMP_H_
#define _JIT_DUMP_H_
#include "jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Dump a register.
*
* @param cc compilation context of the register
* @param reg register to be dumped
*/
void
jit_dump_reg(JitCompContext *cc, JitReg reg);
/**
* Dump an instruction.
*
* @param cc compilation context of the instruction
* @param insn instruction to be dumped
*/
void
jit_dump_insn(JitCompContext *cc, JitInsn *insn);
/**
* Dump a block.
*
* @param cc compilation context of the block
* @param block block to be dumped
*/
void
jit_dump_block(JitCompContext *cc, JitBlock *block);
/**
* Dump a compilation context.
*
* @param cc compilation context to be dumped
*/
void
jit_dump_cc(JitCompContext *cc);
#ifdef __cplusplus
}
#endif
#endif /* end of _JIT_DUMP_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,521 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_FRONTEND_H_
#define _JIT_FRONTEND_H_
#include "jit_utils.h"
#include "jit_ir.h"
#include "../interpreter/wasm_interp.h"
#if WASM_ENABLE_AOT != 0
#include "../aot/aot_runtime.h"
#endif
#if WASM_ENABLE_AOT == 0
typedef enum IntCond {
INT_EQZ = 0,
INT_EQ,
INT_NE,
INT_LT_S,
INT_LT_U,
INT_GT_S,
INT_GT_U,
INT_LE_S,
INT_LE_U,
INT_GE_S,
INT_GE_U
} IntCond;
typedef enum FloatCond {
FLOAT_EQ = 0,
FLOAT_NE,
FLOAT_LT,
FLOAT_GT,
FLOAT_LE,
FLOAT_GE,
FLOAT_UNO
} FloatCond;
#else
#define IntCond AOTIntCond
#define FloatCond AOTFloatCond
#endif
typedef enum IntArithmetic {
INT_ADD = 0,
INT_SUB,
INT_MUL,
INT_DIV_S,
INT_DIV_U,
INT_REM_S,
INT_REM_U
} IntArithmetic;
typedef enum V128Arithmetic {
V128_ADD = 0,
V128_SUB,
V128_MUL,
V128_DIV,
V128_NEG,
V128_MIN,
V128_MAX,
} V128Arithmetic;
typedef enum IntBitwise {
INT_AND = 0,
INT_OR,
INT_XOR,
} IntBitwise;
typedef enum V128Bitwise {
V128_NOT,
V128_AND,
V128_ANDNOT,
V128_OR,
V128_XOR,
V128_BITSELECT,
} V128Bitwise;
typedef enum IntShift {
INT_SHL = 0,
INT_SHR_S,
INT_SHR_U,
INT_ROTL,
INT_ROTR
} IntShift;
typedef enum FloatMath {
FLOAT_ABS = 0,
FLOAT_NEG,
FLOAT_CEIL,
FLOAT_FLOOR,
FLOAT_TRUNC,
FLOAT_NEAREST,
FLOAT_SQRT
} FloatMath;
typedef enum FloatArithmetic {
FLOAT_ADD = 0,
FLOAT_SUB,
FLOAT_MUL,
FLOAT_DIV,
FLOAT_MIN,
FLOAT_MAX,
} FloatArithmetic;
typedef enum JitExceptionID {
JIT_EXCE_UNREACHABLE = 0,
JIT_EXCE_OUT_OF_MEMORY,
JIT_EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
JIT_EXCE_INTEGER_OVERFLOW,
JIT_EXCE_INTEGER_DIVIDE_BY_ZERO,
JIT_EXCE_INVALID_CONVERSION_TO_INTEGER,
JIT_EXCE_INVALID_FUNCTION_TYPE_INDEX,
JIT_EXCE_INVALID_FUNCTION_INDEX,
JIT_EXCE_UNDEFINED_ELEMENT,
JIT_EXCE_UNINITIALIZED_ELEMENT,
JIT_EXCE_CALL_UNLINKED_IMPORT_FUNC,
JIT_EXCE_NATIVE_STACK_OVERFLOW,
JIT_EXCE_UNALIGNED_ATOMIC,
JIT_EXCE_AUX_STACK_OVERFLOW,
JIT_EXCE_AUX_STACK_UNDERFLOW,
JIT_EXCE_OUT_OF_BOUNDS_TABLE_ACCESS,
JIT_EXCE_OPERAND_STACK_OVERFLOW,
JIT_EXCE_ALREADY_THROWN,
JIT_EXCE_NUM,
} JitExceptionID;
/**
* Translate instructions in a function. The translated block must
* end with a branch instruction whose targets are offsets relating to
* the end bcip of the translated block, which are integral constants.
* If a target of a branch is really a constant value (which should be
* rare), put it into a register and then jump to the register instead
* of using the constant value directly in the target. In the
* translation process, don't create any new labels. The code bcip of
* the begin and end of the translated block is stored in the
* jit_annl_begin_bcip and jit_annl_end_bcip annotations of the label
* of the block, which must be the same as the bcips used in
* profiling.
*
* NOTE: the function must explicitly set SP to correct value when the
* entry's bcip is the function's entry address.
*
* @param cc containing compilation context of generated IR
* @param entry entry of the basic block to be translated. If its
* value is NULL, the function will clean up any pass local data that
* might be created previously.
* @param is_reached a bitmap recording which bytecode has been
* reached as a block entry
*
* @return IR block containing translated instructions if succeeds,
* NULL otherwise
*/
JitBasicBlock *
jit_frontend_translate_func(JitCompContext *cc);
/**
* Generate a block leaving the compiled code, which must store the
* target bcip and other necessary information for switching to
* interpreter or other compiled code and then jump to the exit of the
* cc.
*
* @param cc the compilation context
* @param bcip the target bytecode instruction pointer
* @param sp_offset stack pointer offset at the beginning of the block
*
* @return the leaving block if succeeds, NULL otherwise
*/
JitBlock *
jit_frontend_gen_leaving_block(JitCompContext *cc, void *bcip,
unsigned sp_offset);
/**
* Lower the IR of the given compilation context.
*
* @param cc the compilation context
*
* @return true if succeeds, false otherwise
*/
bool
jit_frontend_lower(JitCompContext *cc);
JitReg
get_module_inst_reg(JitFrame *frame);
JitReg
get_module_reg(JitFrame *frame);
JitReg
get_fast_jit_func_ptrs_reg(JitFrame *frame);
JitReg
get_global_data_reg(JitFrame *frame);
JitReg
get_aux_stack_bound_reg(JitFrame *frame);
JitReg
get_aux_stack_bottom_reg(JitFrame *frame);
JitReg
get_memories_reg(JitFrame *frame);
JitReg
get_memory_inst_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_memory_data_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_memory_data_end_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_mem_bound_check_1byte_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_mem_bound_check_2bytes_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_mem_bound_check_4bytes_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_mem_bound_check_8bytes_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_mem_bound_check_16bytes_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_tables_reg(JitFrame *frame);
JitReg
get_table_inst_reg(JitFrame *frame, uint32 table_idx);
JitReg
get_table_data_reg(JitFrame *frame, uint32 table_idx);
JitReg
get_table_cur_size_reg(JitFrame *frame, uint32 table_idx);
void
clear_fixed_virtual_regs(JitFrame *frame);
void
clear_memory_regs(JitFrame *frame);
void
clear_table_regs(JitFrame *frame);
/**
* Get the offset from frame pointer to the n-th local variable slot.
*
* @param n the index to the local variable array
*
* @return the offset from frame pointer to the local variable slot
*/
static inline unsigned
offset_of_local(unsigned n)
{
return offsetof(WASMInterpFrame, lp) + n * 4;
}
/**
* Generate instruction to load an integer from the frame.
*
* This and the below gen_load_X functions generate instructions to
* load values from the frame into registers if the values have not
* been loaded yet.
*
* @param frame the frame information
* @param n slot index to the local variable array
*
* @return register holding the loaded value
*/
JitReg
gen_load_i32(JitFrame *frame, unsigned n);
/**
* Generate instruction to load a i64 integer from the frame.
*
* @param frame the frame information
* @param n slot index to the local variable array
*
* @return register holding the loaded value
*/
JitReg
gen_load_i64(JitFrame *frame, unsigned n);
/**
* Generate instruction to load a floating point value from the frame.
*
* @param frame the frame information
* @param n slot index to the local variable array
*
* @return register holding the loaded value
*/
JitReg
gen_load_f32(JitFrame *frame, unsigned n);
/**
* Generate instruction to load a double value from the frame.
*
* @param frame the frame information
* @param n slot index to the local variable array
*
* @return register holding the loaded value
*/
JitReg
gen_load_f64(JitFrame *frame, unsigned n);
/**
* Generate instructions to commit computation result to the frame.
* The general principle is to only commit values that will be used
* through the frame.
*
* @param frame the frame information
* @param begin the begin value slot to commit
* @param end the end value slot to commit
*/
void
gen_commit_values(JitFrame *frame, JitValueSlot *begin, JitValueSlot *end);
/**
* Generate instructions to commit SP and IP pointers to the frame.
*
* @param frame the frame information
*/
void
gen_commit_sp_ip(JitFrame *frame);
/**
* Generate commit instructions for the block end.
*
* @param frame the frame information
*/
static inline void
gen_commit_for_branch(JitFrame *frame)
{
gen_commit_values(frame, frame->lp, frame->sp);
}
/**
* Generate commit instructions for exception checks.
*
* @param frame the frame information
*/
static inline void
gen_commit_for_exception(JitFrame *frame)
{
gen_commit_values(frame, frame->lp, frame->lp + frame->max_locals);
gen_commit_sp_ip(frame);
}
/**
* Generate commit instructions to commit all status.
*
* @param frame the frame information
*/
static inline void
gen_commit_for_all(JitFrame *frame)
{
gen_commit_values(frame, frame->lp, frame->sp);
gen_commit_sp_ip(frame);
}
static inline void
clear_values(JitFrame *frame)
{
size_t total_size =
sizeof(JitValueSlot) * (frame->max_locals + frame->max_stacks);
memset(frame->lp, 0, total_size);
frame->committed_sp = NULL;
frame->committed_ip = NULL;
clear_fixed_virtual_regs(frame);
}
static inline void
push_i32(JitFrame *frame, JitReg value)
{
frame->sp->reg = value;
frame->sp->dirty = 1;
frame->sp++;
}
static inline void
push_i64(JitFrame *frame, JitReg value)
{
frame->sp->reg = value;
frame->sp->dirty = 1;
frame->sp++;
frame->sp->reg = value;
frame->sp->dirty = 1;
frame->sp++;
}
static inline void
push_f32(JitFrame *frame, JitReg value)
{
push_i32(frame, value);
}
static inline void
push_f64(JitFrame *frame, JitReg value)
{
push_i64(frame, value);
}
static inline JitReg
pop_i32(JitFrame *frame)
{
frame->sp--;
return gen_load_i32(frame, frame->sp - frame->lp);
}
static inline JitReg
pop_i64(JitFrame *frame)
{
frame->sp -= 2;
return gen_load_i64(frame, frame->sp - frame->lp);
}
static inline JitReg
pop_f32(JitFrame *frame)
{
frame->sp--;
return gen_load_f32(frame, frame->sp - frame->lp);
}
static inline JitReg
pop_f64(JitFrame *frame)
{
frame->sp -= 2;
return gen_load_f64(frame, frame->sp - frame->lp);
}
static inline void
pop(JitFrame *frame, int n)
{
frame->sp -= n;
memset(frame->sp, 0, n * sizeof(*frame->sp));
}
static inline JitReg
local_i32(JitFrame *frame, int n)
{
return gen_load_i32(frame, n);
}
static inline JitReg
local_i64(JitFrame *frame, int n)
{
return gen_load_i64(frame, n);
}
static inline JitReg
local_f32(JitFrame *frame, int n)
{
return gen_load_f32(frame, n);
}
static inline JitReg
local_f64(JitFrame *frame, int n)
{
return gen_load_f64(frame, n);
}
static void
set_local_i32(JitFrame *frame, int n, JitReg val)
{
frame->lp[n].reg = val;
frame->lp[n].dirty = 1;
}
static void
set_local_i64(JitFrame *frame, int n, JitReg val)
{
frame->lp[n].reg = val;
frame->lp[n].dirty = 1;
frame->lp[n + 1].reg = val;
frame->lp[n + 1].dirty = 1;
}
static inline void
set_local_f32(JitFrame *frame, int n, JitReg val)
{
set_local_i32(frame, n, val);
}
static inline void
set_local_f64(JitFrame *frame, int n, JitReg val)
{
set_local_i64(frame, n, val);
}
#define POP(jit_value, value_type) \
do { \
if (!jit_cc_pop_value(cc, value_type, &jit_value)) \
goto fail; \
} while (0)
#define POP_I32(v) POP(v, VALUE_TYPE_I32)
#define POP_I64(v) POP(v, VALUE_TYPE_I64)
#define POP_F32(v) POP(v, VALUE_TYPE_F32)
#define POP_F64(v) POP(v, VALUE_TYPE_F64)
#define POP_FUNCREF(v) POP(v, VALUE_TYPE_FUNCREF)
#define POP_EXTERNREF(v) POP(v, VALUE_TYPE_EXTERNREF)
#define PUSH(jit_value, value_type) \
do { \
if (!jit_cc_push_value(cc, value_type, jit_value)) \
goto fail; \
} while (0)
#define PUSH_I32(v) PUSH(v, VALUE_TYPE_I32)
#define PUSH_I64(v) PUSH(v, VALUE_TYPE_I64)
#define PUSH_F32(v) PUSH(v, VALUE_TYPE_F32)
#define PUSH_F64(v) PUSH(v, VALUE_TYPE_F64)
#define PUSH_FUNCREF(v) PUSH(v, VALUE_TYPE_FUNCREF)
#define PUSH_EXTERNREF(v) PUSH(v, VALUE_TYPE_EXTERNREF)
#endif

1403
core/iwasm/fast-jit/jit_ir.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,302 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
/**
* @file jit-ir.def
*
* @brief Definition of JIT IR instructions and annotations.
*/
/**
* @def INSN (NAME, OPND_KIND, OPND_NUM, FIRST_USE)
*
* Definition of IR instructions
*
* @param NAME name of the opcode
* @param OPND_KIND kind of the operand(s)
* @param OPND_NUM number of the operand(s)
* @param FIRST_USE index of the first use register
*
* @p OPND_KIND and @p OPND_NUM together determine the format of an
* instruction. There are four kinds of formats:
*
* 1) Reg: fixed-number register operands, @p OPND_NUM specifies the
* number of operands;
*
* 2) VReg: variable-number register operands, @p OPND_NUM specifies
* the number of fixed register operands;
*
* 3) TableSwitch: tableswitch instruction's format, @p OPND_NUM must
* be 1;
*
* 4) LookupSwitch: lookupswitch instruction's format, @p OPND_NUM
* must be 1.
*
* Instruction operands are all registers and they are organized in an
* order that all registers defined by the instruction, if any, appear
* before the registers used by the instruction. The @p FIRST_USE is
* the index of the first use register in the register vector sorted
* in this order. Use @c jit_insn_opnd_regs to get the register
* vector in this order and use @c jit_insn_opnd_first_use to get the
* index of the first use register.
*
* Every instruction with name @p NAME has the following definitions:
*
* @c JEFF_OP_NAME: the enum opcode of insn NAME
* @c jit_insn_new_NAME (...): creates a new instance of insn NAME
*
* An instruction is deleted by function:
*
* @c jit_insn_delete (@p insn)
*
* In the scope of this IR's terminology, operand and argument have
* different meanings. The operand is a general notation, which
* denotes every raw operand of an instruction, while the argument
* only denotes the variable part of operands of instructions of VReg
* kind. For example, a VReg instruction phi node "r0 = phi(r1, r2)"
* has three operands opnd[0]: r0, opnd[1]: r1 and opnd[2]: r2, but
* only two arguments arg[0]: r1 and arg[1]: r2. Operands or
* arguments of instructions with various formats can be access
* through the following APIs:
*
* @c jit_insn_opnd (@p insn, @p n): for Reg_N formats
* @c jit_insn_opndv (@p insn, @p n): for VReg_N formats
* @c jit_insn_opndv_num (@p insn): for VReg_N formats
* @c jit_insn_opndts (@p insn): for TableSwitch_1 format
* @c jit_insn_opndls (@p insn): for LookupSwitch_1 format
*/
#ifndef INSN
#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE)
#endif
/* Move and conversion instructions that transfer values among
registers of the same kind (move) or different kinds (convert) */
INSN(MOV, Reg, 2, 1)
INSN(PHI, VReg, 1, 1)
/* conversion. will extend or truncate */
INSN(I8TOI32, Reg, 2, 1)
INSN(I8TOI64, Reg, 2, 1)
INSN(I16TOI32, Reg, 2, 1)
INSN(I16TOI64, Reg, 2, 1)
INSN(I32TOI8, Reg, 2, 1)
INSN(I32TOU8, Reg, 2, 1)
INSN(I32TOI16, Reg, 2, 1)
INSN(I32TOU16, Reg, 2, 1)
INSN(I32TOI64, Reg, 2, 1)
INSN(I32TOF32, Reg, 2, 1)
INSN(I32TOF64, Reg, 2, 1)
INSN(U32TOI64, Reg, 2, 1)
INSN(U32TOF32, Reg, 2, 1)
INSN(U32TOF64, Reg, 2, 1)
INSN(I64TOI8, Reg, 2, 1)
INSN(I64TOI16, Reg, 2, 1)
INSN(I64TOI32, Reg, 2, 1)
INSN(I64TOF32, Reg, 2, 1)
INSN(I64TOF64, Reg, 2, 1)
INSN(F32TOI32, Reg, 2, 1)
INSN(F32TOI64, Reg, 2, 1)
INSN(F32TOF64, Reg, 2, 1)
INSN(F32TOU32, Reg, 2, 1)
INSN(F64TOI32, Reg, 2, 1)
INSN(F64TOI64, Reg, 2, 1)
INSN(F64TOF32, Reg, 2, 1)
INSN(F64TOU32, Reg, 2, 1)
/**
* Re-interpret binary presentations:
* *(i32 *)&f32, *(i64 *)&f64, *(f32 *)&i32, *(f64 *)&i64
*/
INSN(I32CASTF32, Reg, 2, 1)
INSN(I64CASTF64, Reg, 2, 1)
INSN(F32CASTI32, Reg, 2, 1)
INSN(F64CASTI64, Reg, 2, 1)
/* Arithmetic and bitwise instructions: */
INSN(NEG, Reg, 2, 1)
INSN(NOT, Reg, 2, 1)
INSN(ADD, Reg, 3, 1)
INSN(SUB, Reg, 3, 1)
INSN(MUL, Reg, 3, 1)
INSN(DIV_S, Reg, 3, 1)
INSN(REM_S, Reg, 3, 1)
INSN(DIV_U, Reg, 3, 1)
INSN(REM_U, Reg, 3, 1)
INSN(SHL, Reg, 3, 1)
INSN(SHRS, Reg, 3, 1)
INSN(SHRU, Reg, 3, 1)
INSN(ROTL, Reg, 3, 1)
INSN(ROTR, Reg, 3, 1)
INSN(OR, Reg, 3, 1)
INSN(XOR, Reg, 3, 1)
INSN(AND, Reg, 3, 1)
INSN(CMP, Reg, 3, 1)
INSN(MAX, Reg, 3, 1)
INSN(MIN, Reg, 3, 1)
INSN(CLZ, Reg, 2, 1)
INSN(CTZ, Reg, 2, 1)
INSN(POPCNT, Reg, 2, 1)
/* Select instruction: */
INSN(SELECTEQ, Reg, 4, 1)
INSN(SELECTNE, Reg, 4, 1)
INSN(SELECTGTS, Reg, 4, 1)
INSN(SELECTGES, Reg, 4, 1)
INSN(SELECTLTS, Reg, 4, 1)
INSN(SELECTLES, Reg, 4, 1)
INSN(SELECTGTU, Reg, 4, 1)
INSN(SELECTGEU, Reg, 4, 1)
INSN(SELECTLTU, Reg, 4, 1)
INSN(SELECTLEU, Reg, 4, 1)
/* Memory access instructions: */
INSN(LDEXECENV, Reg, 1, 1)
INSN(LDJITINFO, Reg, 1, 1)
INSN(LDI8, Reg, 3, 1)
INSN(LDU8, Reg, 3, 1)
INSN(LDI16, Reg, 3, 1)
INSN(LDU16, Reg, 3, 1)
INSN(LDI32, Reg, 3, 1)
INSN(LDU32, Reg, 3, 1)
INSN(LDI64, Reg, 3, 1)
INSN(LDU64, Reg, 3, 1)
INSN(LDF32, Reg, 3, 1)
INSN(LDF64, Reg, 3, 1)
INSN(LDPTR, Reg, 3, 1)
INSN(LDV64, Reg, 3, 1)
INSN(LDV128, Reg, 3, 1)
INSN(LDV256, Reg, 3, 1)
INSN(STI8, Reg, 3, 0)
INSN(STI16, Reg, 3, 0)
INSN(STI32, Reg, 3, 0)
INSN(STI64, Reg, 3, 0)
INSN(STF32, Reg, 3, 0)
INSN(STF64, Reg, 3, 0)
INSN(STPTR, Reg, 3, 0)
INSN(STV64, Reg, 3, 1)
INSN(STV128, Reg, 3, 1)
INSN(STV256, Reg, 3, 1)
/* Control instructions */
INSN(JMP, Reg, 1, 0)
INSN(BEQ, Reg, 3, 0)
INSN(BNE, Reg, 3, 0)
INSN(BGTS, Reg, 3, 0)
INSN(BGES, Reg, 3, 0)
INSN(BLTS, Reg, 3, 0)
INSN(BLES, Reg, 3, 0)
INSN(BGTU, Reg, 3, 0)
INSN(BGEU, Reg, 3, 0)
INSN(BLTU, Reg, 3, 0)
INSN(BLEU, Reg, 3, 0)
INSN(LOOKUPSWITCH, LookupSwitch, 1, 0)
/* Call and return instructions */
INSN(CALLNATIVE, VReg, 2, 1)
INSN(CALLBC, Reg, 3, 2)
INSN(RETURNBC, Reg, 3, 0)
INSN(RETURN, Reg, 1, 0)
#undef INSN
/**
* @def ANN_LABEL (TYPE, NAME)
*
* Definition of label annotations.
*
* @param TYPE type of the annotation
* @param NAME name of the annotation
*
* Each defined annotation with name NAME has the following APIs:
*
* @c jit_annl_NAME (cc, label): accesses the annotation NAME of
* label @p label
* @c jit_annl_enable_NAME (cc): enables the annotation NAME
* @c jit_annl_disable_NAME (cc): disables the annotation NAME
* @c jit_annl_is_enabled_NAME (cc): check whether the annotation NAME
* is enabled
*/
#ifndef ANN_LABEL
#define ANN_LABEL(TYPE, NAME)
#endif
/* Basic Block of a label. */
ANN_LABEL(JitBasicBlock *, basic_block)
/* Predecessor number of the block that is only used in
jit_cc_update_cfg for updating the CFG. */
ANN_LABEL(uint16, pred_num)
/* Execution frequency of a block. We can split critical edges with
empty blocks so we don't need to store frequencies of edges. */
ANN_LABEL(uint16, freq)
/* Begin bytecode instruction pointer of the block. */
ANN_LABEL(uint8 *, begin_bcip)
/* End bytecode instruction pointer of the block. */
ANN_LABEL(uint8 *, end_bcip)
/* Stack pointer offset at the end of the block. */
ANN_LABEL(uint16, end_sp)
/* The label of the next physically adjacent block. */
ANN_LABEL(JitReg, next_label)
/* Compiled code address of the block. */
ANN_LABEL(void *, jitted_addr)
#undef ANN_LABEL
/**
* @def ANN_INSN (TYPE, NAME)
*
* Definition of instruction annotations.
*
* @param TYPE type of the annotation
* @param NAME name of the annotation
*
* Each defined annotation with name NAME has the following APIs:
*
* @c jit_anni_NAME (cc, insn): accesses the annotation NAME of
* instruction @p insn
* @c jit_anni_enable_NAME (cc): enables the annotation NAME
* @c jit_anni_disable_NAME (cc): disables the annotation NAME
* @c jit_anni_is_enabled_NAME (cc): check whether the annotation NAME
* is enabled
*/
#ifndef ANN_INSN
#define ANN_INSN(TYPE, NAME)
#endif
/* A private annotation for linking instructions with the same hash
value, which is only used by the compilation context's hash table
of instructions. */
ANN_INSN(JitInsn *, _hash_link)
#undef ANN_INSN
/**
* @def ANN_REG (TYPE, NAME)
*
* Definition of register annotations.
*
* @param TYPE type of the annotation
* @param NAME name of the annotation
*
* Each defined annotation with name NAME has the following APIs:
*
* @c jit_annr_NAME (cc, reg): accesses the annotation NAME of
* register @p reg
* @c jit_annr_enable_NAME (cc): enables the annotation NAME
* @c jit_annr_disable_NAME (cc): disables the annotation NAME
* @c jit_annr_is_enabled_NAME (cc): check whether the annotation NAME
* is enabled
*/
#ifndef ANN_REG
#define ANN_REG(TYPE, NAME)
#endif
/* Defining instruction of registers satisfying SSA property. */
ANN_REG(JitInsn *, def_insn)
#undef ANN_REG

1874
core/iwasm/fast-jit/jit_ir.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,840 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_utils.h"
#include "jit_compiler.h"
#if BH_DEBUG != 0
#define VREG_DEF_SANITIZER
#endif
/**
* A uint16 stack for storing distances of occurrences of virtual
* registers.
*/
typedef struct UintStack {
/* Capacity of the stack. */
uint32 capacity;
/* Top index of the stack. */
uint32 top;
/* Elements of the vector. */
uint32 elem[1];
} UintStack;
static bool
uint_stack_push(UintStack **stack, unsigned val)
{
unsigned capacity = *stack ? (*stack)->capacity : 0;
unsigned top = *stack ? (*stack)->top : 0;
bh_assert(top <= capacity);
if (top == capacity) {
const unsigned elem_size = sizeof((*stack)->elem[0]);
unsigned new_capacity = capacity ? capacity + capacity / 2 : 4;
UintStack *new_stack =
jit_malloc(offsetof(UintStack, elem) + elem_size * new_capacity);
if (!new_stack)
return false;
new_stack->capacity = new_capacity;
new_stack->top = top;
if (*stack)
memcpy(new_stack->elem, (*stack)->elem, elem_size * top);
jit_free(*stack);
*stack = new_stack;
}
(*stack)->elem[(*stack)->top++] = val;
return true;
}
static int
uint_stack_top(UintStack *stack)
{
return stack->elem[stack->top - 1];
}
static void
uint_stack_delete(UintStack **stack)
{
jit_free(*stack);
*stack = NULL;
}
static void
uint_stack_pop(UintStack **stack)
{
bh_assert((*stack)->top > 0);
/**
* TODO: the fact of empty distances stack means there is no instruction
* using current JitReg anymore. so shall we release the HardReg and clean
* VirtualReg information?
*/
if (--(*stack)->top == 0)
uint_stack_delete(stack);
}
/**
* Information of a virtual register.
*/
typedef struct VirtualReg {
/* The hard register allocated to this virtual register. */
JitReg hreg;
/* The spill slot allocated to this virtual register. */
JitReg slot;
/* The hard register allocated to global virtual registers. It is 0
for local registers, whose lifetime is within one basic block. */
JitReg global_hreg;
/* Distances from the beginning of basic block of all occurrences of the
virtual register in the basic block. */
UintStack *distances;
} VirtualReg;
/**
* Information of a hard register.
*/
typedef struct HardReg {
/* The virtual register this hard register is allocated to. */
JitReg vreg;
} HardReg;
/**
* Information of a spill slot.
*/
typedef struct SpillSlot {
/* The virtual register this spill slot is allocated to. */
JitReg vreg;
} SpillSlot;
typedef struct RegallocContext {
/* The compiler context. */
JitCompContext *cc;
/* Information of virtual registers. The register allocation must
not increase the virtual register number during the allocation
process. */
VirtualReg *vregs[JIT_REG_KIND_L32];
/* Information of hard registers. */
HardReg *hregs[JIT_REG_KIND_L32];
/* Number of elements in the spill_slots array. */
uint32 spill_slot_num;
/* Information of spill slots. */
SpillSlot *spill_slots;
/* The last define-released hard register. */
JitReg last_def_released_hreg;
} RegallocContext;
/**
* Get the VirtualReg structure of the given virtual register.
*
* @param rc the regalloc context
* @param vreg the virtual register
*
* @return the VirtualReg structure of the given virtual register
*/
static VirtualReg *
rc_get_vr(RegallocContext *rc, JitReg vreg)
{
unsigned kind = jit_reg_kind(vreg);
unsigned no = jit_reg_no(vreg);
bh_assert(jit_reg_is_variable(vreg));
return &rc->vregs[kind][no];
}
/**
* Get the HardReg structure of the given hard register.
*
* @param rc the regalloc context
* @param hreg the hard register
*
* @return the HardReg structure of the given hard register
*/
static HardReg *
rc_get_hr(RegallocContext *rc, JitReg hreg)
{
unsigned kind = jit_reg_kind(hreg);
unsigned no = jit_reg_no(hreg);
bh_assert(jit_reg_is_variable(hreg) && jit_cc_is_hreg(rc->cc, hreg));
return &rc->hregs[kind][no];
}
/**
* Get the SpillSlot structure of the given slot.
*
* @param rc the regalloc context
* @param slot the constant register representing the slot index
*
* @return the SpillSlot of the given slot
*/
static SpillSlot *
rc_get_spill_slot(RegallocContext *rc, JitReg slot)
{
unsigned index = jit_cc_get_const_I32(rc->cc, slot);
bh_assert(index < rc->spill_slot_num);
return &rc->spill_slots[index];
}
/**
* Get the stride in the spill slots of the register.
*
* @param reg a virtual register
*
* @return stride in the spill slots
*/
static unsigned
get_reg_stride(JitReg reg)
{
static const uint8 strides[] = { 0, 1, 2, 1, 2, 2, 4, 8, 0 };
return strides[jit_reg_kind(reg)];
}
/**
* Allocate a spill slot for the given virtual register.
*
* @param rc the regalloc context
* @param vreg the virtual register
*
* @return the spill slot encoded in a consant register
*/
static JitReg
rc_alloc_spill_slot(RegallocContext *rc, JitReg vreg)
{
const unsigned stride = get_reg_stride(vreg);
unsigned mask, new_num, i, j;
SpillSlot *slots;
bh_assert(stride > 0);
for (i = 0; i < rc->spill_slot_num; i += stride)
for (j = i;; j++) {
if (j == i + stride)
/* Found a free slot for vreg. */
goto found;
if (rc->spill_slots[j].vreg)
break;
}
/* No free slot, increase the slot number. */
mask = stride - 1;
/* Align the slot index. */
i = (rc->spill_slot_num + mask) & ~mask;
new_num = i == 0 ? 32 : i + i / 2;
if (!(slots = jit_calloc(sizeof(*slots) * new_num)))
return 0;
if (rc->spill_slots)
memcpy(slots, rc->spill_slots, sizeof(*slots) * rc->spill_slot_num);
jit_free(rc->spill_slots);
rc->spill_slots = slots;
rc->spill_slot_num = new_num;
found:
/* Now, i is the first slot for vreg. */
if ((i + stride) * 4 > rc->cc->spill_cache_size)
/* No frame space for the spill area. */
return 0;
/* Allocate the slot(s) to vreg. */
for (j = i; j < i + stride; j++)
rc->spill_slots[j].vreg = vreg;
return jit_cc_new_const_I32(rc->cc, i);
}
/**
* Free a spill slot.
*
* @param rc the regalloc context
* @param slot_reg the constant register representing the slot index
*/
static void
rc_free_spill_slot(RegallocContext *rc, JitReg slot_reg)
{
if (slot_reg) {
SpillSlot *slot = rc_get_spill_slot(rc, slot_reg);
const JitReg vreg = slot->vreg;
const unsigned stride = get_reg_stride(vreg);
unsigned i;
for (i = 0; i < stride; i++)
slot[i].vreg = 0;
}
}
static void
rc_destroy(RegallocContext *rc)
{
unsigned i, j;
for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) {
const unsigned vreg_num = jit_cc_reg_num(rc->cc, i);
if (rc->vregs[i])
for (j = 0; j < vreg_num; j++)
uint_stack_delete(&rc->vregs[i][j].distances);
jit_free(rc->vregs[i]);
jit_free(rc->hregs[i]);
}
jit_free(rc->spill_slots);
}
static bool
rc_init(RegallocContext *rc, JitCompContext *cc)
{
unsigned i, j;
memset(rc, 0, sizeof(*rc));
rc->cc = cc;
for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) {
const unsigned vreg_num = jit_cc_reg_num(cc, i);
const unsigned hreg_num = jit_cc_hreg_num(cc, i);
if (vreg_num > 0
&& !(rc->vregs[i] = jit_calloc(sizeof(VirtualReg) * vreg_num)))
goto fail;
if (hreg_num > 0
&& !(rc->hregs[i] = jit_calloc(sizeof(HardReg) * hreg_num)))
goto fail;
/* Hard registers can only be allocated to themselves. */
for (j = 0; j < hreg_num; j++)
rc->vregs[i][j].global_hreg = jit_reg_new(i, j);
}
return true;
fail:
rc_destroy(rc);
return false;
}
/**
* Check whether the given register is an allocation candidate, which
* must be a variable register that is not fixed hard register.
*
* @param cc the compilation context
* @param reg the register
*
* @return true if the register is an allocation candidate
*/
static bool
is_alloc_candidate(JitCompContext *cc, JitReg reg)
{
return (jit_reg_is_variable(reg)
&& (!jit_cc_is_hreg(cc, reg) || !jit_cc_is_hreg_fixed(cc, reg)));
}
#ifdef VREG_DEF_SANITIZER
static void
check_vreg_definition(RegallocContext *rc, JitInsn *insn)
{
JitRegVec regvec = jit_insn_opnd_regs(insn);
JitReg *regp, reg_defined = 0;
unsigned i, first_use = jit_insn_opnd_first_use(insn);
/* check if there is the definition of an vr before its references */
JIT_REG_VEC_FOREACH(regvec, i, regp)
{
VirtualReg *vr = NULL;
if (!is_alloc_candidate(rc->cc, *regp))
continue;
/* a strong assumption that there is only one defined reg */
if (i < first_use) {
reg_defined = *regp;
continue;
}
/**
* both definition and references are in one instruction,
* like MOV i3, i3
*/
if (reg_defined == *regp)
continue;
vr = rc_get_vr(rc, *regp);
bh_assert(vr->distances);
}
}
#endif
/**
* Collect distances from the beginning of basic block of all occurrences of
* each virtual register.
*
* @param rc the regalloc context
* @param basic_block the basic block
*
* @return distance of the end instruction if succeeds, -1 otherwise
*/
static int
collect_distances(RegallocContext *rc, JitBasicBlock *basic_block)
{
JitInsn *insn;
int distance = 1;
JIT_FOREACH_INSN(basic_block, insn)
{
JitRegVec regvec = jit_insn_opnd_regs(insn);
unsigned i;
JitReg *regp;
#ifdef VREG_DEF_SANITIZER
check_vreg_definition(rc, insn);
#endif
/* NOTE: the distance may be pushed more than once if the
virtual register occurs multiple times in the
instruction. */
JIT_REG_VEC_FOREACH(regvec, i, regp)
if (is_alloc_candidate(rc->cc, *regp))
if (!uint_stack_push(&(rc_get_vr(rc, *regp))->distances, distance))
return -1;
/* Integer overflow check, normally it won't happen, but
we had better add the check here */
if (distance >= INT32_MAX)
return -1;
distance++;
}
return distance;
}
static JitReg
offset_of_spill_slot(JitCompContext *cc, JitReg slot)
{
return jit_cc_new_const_I32(cc, cc->spill_cache_offset
+ jit_cc_get_const_I32(cc, slot) * 4);
}
/**
* Reload the virtual register from memory. Reload instruction will
* be inserted after the given instruction.
*
* @param rc the regalloc context
* @param vreg the virtual register to be reloaded
* @param cur_insn the current instruction after which the reload
* insertion will be inserted
*
* @return the reload instruction if succeeds, NULL otherwise
*/
static JitInsn *
reload_vreg(RegallocContext *rc, JitReg vreg, JitInsn *cur_insn)
{
VirtualReg *vr = rc_get_vr(rc, vreg);
HardReg *hr = rc_get_hr(rc, vr->hreg);
JitInsn *insn = NULL;
if (vreg == rc->cc->exec_env_reg)
/* Reload exec_env_reg with LDEXECENV. */
insn = jit_cc_new_insn(rc->cc, LDEXECENV, vr->hreg);
else
/* Allocate spill slot if not yet and reload from there. */
{
JitReg fp_reg = rc->cc->fp_reg, offset;
if (!vr->slot && !(vr->slot = rc_alloc_spill_slot(rc, vreg)))
/* Cannot allocte spill slot (due to OOM or frame size limit). */
return NULL;
offset = offset_of_spill_slot(rc->cc, vr->slot);
switch (jit_reg_kind(vreg)) {
case JIT_REG_KIND_I32:
insn = jit_cc_new_insn(rc->cc, LDI32, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_I64:
insn = jit_cc_new_insn(rc->cc, LDI64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_F32:
insn = jit_cc_new_insn(rc->cc, LDF32, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_F64:
insn = jit_cc_new_insn(rc->cc, LDF64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V64:
insn = jit_cc_new_insn(rc->cc, LDV64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V128:
insn =
jit_cc_new_insn(rc->cc, LDV128, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V256:
insn =
jit_cc_new_insn(rc->cc, LDV256, vr->hreg, fp_reg, offset);
break;
default:
bh_assert(0);
}
}
if (insn)
jit_insn_insert_after(cur_insn, insn);
bh_assert(hr->vreg == vreg);
hr->vreg = vr->hreg = 0;
return insn;
}
/**
* Spill the virtual register (which cannot be exec_env_reg) to memory.
* Spill instruction will be inserted after the given instruction.
*
* @param rc the regalloc context
* @param vreg the virtual register to be reloaded
* @param cur_insn the current instruction after which the reload
* insertion will be inserted
*
* @return the spill instruction if succeeds, NULL otherwise
*/
static JitInsn *
spill_vreg(RegallocContext *rc, JitReg vreg, JitInsn *cur_insn)
{
VirtualReg *vr = rc_get_vr(rc, vreg);
JitReg fp_reg = rc->cc->fp_reg, offset;
JitInsn *insn;
/* There is no chance to spill exec_env_reg. */
bh_assert(vreg != rc->cc->exec_env_reg);
bh_assert(vr->hreg && vr->slot);
offset = offset_of_spill_slot(rc->cc, vr->slot);
switch (jit_reg_kind(vreg)) {
case JIT_REG_KIND_I32:
insn = jit_cc_new_insn(rc->cc, STI32, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_I64:
insn = jit_cc_new_insn(rc->cc, STI64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_F32:
insn = jit_cc_new_insn(rc->cc, STF32, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_F64:
insn = jit_cc_new_insn(rc->cc, STF64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V64:
insn = jit_cc_new_insn(rc->cc, STV64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V128:
insn = jit_cc_new_insn(rc->cc, STV128, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V256:
insn = jit_cc_new_insn(rc->cc, STV256, vr->hreg, fp_reg, offset);
break;
default:
bh_assert(0);
return NULL;
}
if (insn)
jit_insn_insert_after(cur_insn, insn);
return insn;
}
/**
* Allocate a hard register for the virtual register. Necessary
* reloade instruction will be inserted after the given instruction.
*
* @param rc the regalloc context
* @param vreg the virtual register
* @param insn the instruction after which the reload insertion will
* be inserted
* @param distance the distance of the current instruction
*
* @return the hard register allocated if succeeds, 0 otherwise
*/
static JitReg
allocate_hreg(RegallocContext *rc, JitReg vreg, JitInsn *insn, int distance)
{
const int kind = jit_reg_kind(vreg);
const HardReg *hregs = rc->hregs[kind];
const unsigned hreg_num = jit_cc_hreg_num(rc->cc, kind);
JitReg hreg, vreg_to_reload = 0;
int min_distance = distance, vr_distance;
VirtualReg *vr = rc_get_vr(rc, vreg);
unsigned i;
if (hreg_num == 0)
/* Unsupported hard register kind. */
{
jit_set_last_error(rc->cc, "unsupported hard register kind");
return 0;
}
if (vr->global_hreg)
/* It has globally allocated register, we can only use it. */
{
if ((vreg_to_reload = (rc_get_hr(rc, vr->global_hreg))->vreg))
if (!reload_vreg(rc, vreg_to_reload, insn))
return 0;
return vr->global_hreg;
}
/* Use the last define-released register if its kind is correct and
it's free so as to optimize for two-operand instructions. */
if (jit_reg_kind(rc->last_def_released_hreg) == kind
&& (rc_get_hr(rc, rc->last_def_released_hreg))->vreg == 0)
return rc->last_def_released_hreg;
/* No hint given, just try to pick any free register. */
for (i = 0; i < hreg_num; i++) {
hreg = jit_reg_new(kind, i);
if (jit_cc_is_hreg_fixed(rc->cc, hreg))
continue;
if (hregs[i].vreg == 0)
/* Found a free one, return it. */
return hreg;
}
/* No free registers, need to spill and reload one. */
for (i = 0; i < hreg_num; i++) {
if (jit_cc_is_hreg_fixed(rc->cc, jit_reg_new(kind, i)))
continue;
vr = rc_get_vr(rc, hregs[i].vreg);
/* TODO: since the hregs[i] is in use, its distances should be valid */
vr_distance = vr->distances ? uint_stack_top(vr->distances) : 0;
if (vr_distance < min_distance) {
min_distance = vr_distance;
vreg_to_reload = hregs[i].vreg;
hreg = jit_reg_new(kind, i);
}
}
bh_assert(min_distance < distance);
if (!reload_vreg(rc, vreg_to_reload, insn))
return 0;
return hreg;
}
/**
* Allocate a hard register for the virtual register if not allocated
* yet. Necessary spill and reloade instructions will be inserted
* before/after and after the given instruction. This operation will
* convert the virtual register's state from 1 or 3 to 2.
*
* @param rc the regalloc context
* @param vreg the virtual register
* @param insn the instruction after which the spill and reload
* insertions will be inserted
* @param distance the distance of the current instruction
*
* @return the hard register allocated to the virtual register if
* succeeds, 0 otherwise
*/
static JitReg
allocate_for_vreg(RegallocContext *rc, JitReg vreg, JitInsn *insn, int distance)
{
VirtualReg *vr = rc_get_vr(rc, vreg);
if (vr->hreg)
/* It has had a hard register, reuse it. */
return vr->hreg;
/* Not allocated yet. */
if ((vr->hreg = allocate_hreg(rc, vreg, insn, distance)))
(rc_get_hr(rc, vr->hreg))->vreg = vreg;
return vr->hreg;
}
/**
* Clobber live registers.
*
* @param rc the regalloc context
* @param is_native whether it's native ABI or JITed ABI
* @param insn the instruction after which the reload insertion will
* be inserted
*
* @return true if succeeds, false otherwise
*/
static bool
clobber_live_regs(RegallocContext *rc, bool is_native, JitInsn *insn)
{
unsigned i, j;
for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) {
const unsigned hreg_num = jit_cc_hreg_num(rc->cc, i);
for (j = 0; j < hreg_num; j++) {
JitReg hreg = jit_reg_new(i, j);
bool caller_saved =
(is_native ? jit_cc_is_hreg_caller_saved_native(rc->cc, hreg)
: jit_cc_is_hreg_caller_saved_jitted(rc->cc, hreg));
if (caller_saved && rc->hregs[i][j].vreg)
if (!reload_vreg(rc, rc->hregs[i][j].vreg, insn))
return false;
}
}
return true;
}
/**
* Do local register allocation for the given basic block
*
* @param rc the regalloc context
* @param basic_block the basic block
* @param distance the distance of the last instruction of the basic block
*
* @return true if succeeds, false otherwise
*/
static bool
allocate_for_basic_block(RegallocContext *rc, JitBasicBlock *basic_block,
int distance)
{
JitInsn *insn;
JIT_FOREACH_INSN_REVERSE(basic_block, insn)
{
JitRegVec regvec = jit_insn_opnd_regs(insn);
unsigned first_use = jit_insn_opnd_first_use(insn);
unsigned i;
JitReg *regp;
distance--;
JIT_REG_VEC_FOREACH_DEF(regvec, i, regp, first_use)
if (is_alloc_candidate(rc->cc, *regp)) {
const JitReg vreg = *regp;
VirtualReg *vr = rc_get_vr(rc, vreg);
if (!(*regp = allocate_for_vreg(rc, vreg, insn, distance)))
return false;
/* Spill the register if required. */
if (vr->slot && !spill_vreg(rc, vreg, insn))
return false;
bh_assert(uint_stack_top(vr->distances) == distance);
uint_stack_pop(&vr->distances);
/* Record the define-released hard register. */
rc->last_def_released_hreg = vr->hreg;
/* Release the hreg and spill slot. */
rc_free_spill_slot(rc, vr->slot);
(rc_get_hr(rc, vr->hreg))->vreg = 0;
vr->hreg = vr->slot = 0;
}
if (insn->opcode == JIT_OP_CALLBC) {
if (!clobber_live_regs(rc, false, insn))
return false;
/* The exec_env_reg is implicitly used by the callee. */
if (!allocate_for_vreg(rc, rc->cc->exec_env_reg, insn, distance))
return false;
}
else if (insn->opcode == JIT_OP_CALLNATIVE) {
if (!clobber_live_regs(rc, true, insn))
return false;
}
JIT_REG_VEC_FOREACH_USE(regvec, i, regp, first_use)
if (is_alloc_candidate(rc->cc, *regp)) {
if (!allocate_for_vreg(rc, *regp, insn, distance))
return false;
}
JIT_REG_VEC_FOREACH_USE(regvec, i, regp, first_use)
if (is_alloc_candidate(rc->cc, *regp)) {
VirtualReg *vr = rc_get_vr(rc, *regp);
bh_assert(uint_stack_top(vr->distances) == distance);
uint_stack_pop(&vr->distances);
/* be sure that the hreg exists and hasn't been spilled out */
bh_assert(vr->hreg != 0);
*regp = vr->hreg;
}
}
return true;
}
bool
jit_pass_regalloc(JitCompContext *cc)
{
RegallocContext rc = { 0 };
unsigned label_index, end_label_index;
JitBasicBlock *basic_block;
VirtualReg *self_vr;
bool retval = false;
if (!rc_init(&rc, cc))
return false;
/* NOTE: don't allocate new virtual registers during allocation
because the rc->vregs array is fixed size. */
/* TODO: allocate hard registers for global virtual registers here.
Currently, exec_env_reg is the only global virtual register. */
self_vr = rc_get_vr(&rc, cc->exec_env_reg);
JIT_FOREACH_BLOCK_ENTRY_EXIT(cc, label_index, end_label_index, basic_block)
{
int distance;
/* TODO: initialize hreg for live-out registers. */
self_vr->hreg = self_vr->global_hreg;
(rc_get_hr(&rc, cc->exec_env_reg))->vreg = cc->exec_env_reg;
/**
* TODO: the allocation of a basic block keeps using vregs[]
* and hregs[] from previous basic block
*/
if ((distance = collect_distances(&rc, basic_block)) < 0)
goto cleanup_and_return;
if (!allocate_for_basic_block(&rc, basic_block, distance))
goto cleanup_and_return;
/* TODO: generate necessary spills for live-in registers. */
}
retval = true;
cleanup_and_return:
rc_destroy(&rc);
return retval;
}

View File

@ -0,0 +1,19 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_utils.h"
JitBitmap *
jit_bitmap_new(uintptr_t begin_index, unsigned bitnum)
{
JitBitmap *bitmap;
if ((bitmap = jit_calloc(offsetof(JitBitmap, map) + (bitnum + 7) / 8))) {
bitmap->begin_index = begin_index;
bitmap->end_index = begin_index + bitnum;
}
return bitmap;
}

View File

@ -0,0 +1,136 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_UTILS_H_
#define _JIT_UTILS_H_
#include "bh_platform.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* A simple fixed size bitmap.
*/
typedef struct JitBitmap {
/* The first valid bit index. */
uintptr_t begin_index;
/* The last valid bit index plus one. */
uintptr_t end_index;
/* The bitmap. */
uint8 map[1];
} JitBitmap;
static inline void *
jit_malloc(unsigned int size)
{
return wasm_runtime_malloc(size);
}
static inline void *
jit_calloc(unsigned int size)
{
void *ret = wasm_runtime_malloc(size);
if (ret) {
memset(ret, 0, size);
}
return ret;
}
static inline void
jit_free(void *ptr)
{
if (ptr)
wasm_runtime_free(ptr);
}
/**
* Create a new bitmap.
*
* @param begin_index the first valid bit index
* @param bitnum maximal bit number of the bitmap.
*
* @return the new bitmap if succeeds, NULL otherwise.
*/
JitBitmap *
jit_bitmap_new(uintptr_t begin_index, unsigned bitnum);
/**
* Delete a bitmap.
*
* @param bitmap the bitmap to be deleted
*/
static inline void
jit_bitmap_delete(JitBitmap *bitmap)
{
jit_free(bitmap);
}
/**
* Check whether the given index is in the range of the bitmap.
*
* @param bitmap the bitmap
* @param n the bit index
*
* @return true if the index is in range, false otherwise
*/
static inline bool
jit_bitmap_is_in_range(JitBitmap *bitmap, unsigned n)
{
return n >= bitmap->begin_index && n < bitmap->end_index;
}
/**
* Get a bit in the bitmap
*
* @param bitmap the bitmap
* @param n the n-th bit to be get
*
* @return value of the bit
*/
static inline int
jit_bitmap_get_bit(JitBitmap *bitmap, unsigned n)
{
unsigned idx = n - bitmap->begin_index;
bh_assert(n >= bitmap->begin_index && n < bitmap->end_index);
return (bitmap->map[idx / 8] >> (idx % 8)) & 1;
}
/**
* Set a bit in the bitmap.
*
* @param bitmap the bitmap
* @param n the n-th bit to be set
*/
static inline void
jit_bitmap_set_bit(JitBitmap *bitmap, unsigned n)
{
unsigned idx = n - bitmap->begin_index;
bh_assert(n >= bitmap->begin_index && n < bitmap->end_index);
bitmap->map[idx / 8] |= 1 << (idx % 8);
}
/**
* Clear a bit in the bitmap.
*
* @param bitmap the bitmap
* @param n the n-th bit to be cleared
*/
static inline void
jit_bitmap_clear_bit(JitBitmap *bitmap, unsigned n)
{
unsigned idx = n - bitmap->begin_index;
bh_assert(n >= bitmap->begin_index && n < bitmap->end_index);
bitmap->map[idx / 8] &= ~(1 << (idx % 8));
}
#ifdef __cplusplus
}
#endif
#endif

View File

@ -139,6 +139,9 @@ typedef struct RuntimeInitArgs {
char ip_addr[128];
int platform_port;
int instance_port;
/* Fast JIT code cache size */
uint32_t fast_jit_code_cache_size;
} RuntimeInitArgs;
#ifndef WASM_VALKIND_T_DEFINED
@ -482,6 +485,23 @@ wasm_runtime_create_exec_env(wasm_module_inst_t module_inst,
WASM_RUNTIME_API_EXTERN void
wasm_runtime_destroy_exec_env(wasm_exec_env_t exec_env);
/**
* Get the singleton execution environment for the instance.
*
* Note: The singleton execution environment is the execution
* environment used internally by the runtime for the API functions
* like wasm_application_execute_main, which don't take explicit
* execution environment. It's associated to the corresponding
* module instance and managed by the runtime. The API user should
* not destroy it with wasm_runtime_destroy_exec_env.
*
* @param module_inst the module instance
*
* @return exec_env the execution environment to destroy
*/
WASM_RUNTIME_API_EXTERN wasm_exec_env_t
wasm_runtime_get_exec_env_singleton(wasm_module_inst_t module_inst);
/**
* Start debug instance based on given execution environment.
* Note:
@ -1117,6 +1137,12 @@ WASM_RUNTIME_API_EXTERN const uint8_t *
wasm_runtime_get_custom_section(wasm_module_t const module_comm,
const char *name, uint32_t *len);
/**
* Get WAMR semantic version
*/
WASM_RUNTIME_API_EXTERN void
wasm_runtime_get_version(uint32_t *major, uint32_t *minor, uint32_t *patch);
/* clang-format on */
#ifdef __cplusplus

View File

@ -122,6 +122,7 @@ typedef struct WASMType {
uint16 result_count;
uint16 param_cell_num;
uint16 ret_cell_num;
uint16 ref_count;
/* types of params and results */
uint8 types[1];
} WASMType;
@ -204,6 +205,10 @@ typedef struct WASMGlobalImport {
WASMModule *import_module;
WASMGlobal *import_global_linked;
#endif
#if WASM_ENABLE_FAST_JIT != 0
/* The data offset of current global in global data */
uint32 data_offset;
#endif
} WASMGlobalImport;
typedef struct WASMImport {
@ -254,12 +259,19 @@ struct WASMFunction {
uint8 *consts;
uint32 const_cell_num;
#endif
#if WASM_ENABLE_FAST_JIT != 0
void *fast_jit_jitted_code;
#endif
};
struct WASMGlobal {
uint8 type;
bool is_mutable;
InitializerExpression init_expr;
#if WASM_ENABLE_FAST_JIT != 0
/* The data offset of current global in global data */
uint32 data_offset;
#endif
};
typedef struct WASMExport {
@ -443,9 +455,12 @@ struct WASMModule {
#if WASM_ENABLE_DEBUG_INTERP != 0 || WASM_ENABLE_DEBUG_AOT != 0
bh_list fast_opcode_list;
uint8 *buf_code;
uint64 buf_code_size;
#endif
#if WASM_ENABLE_DEBUG_INTERP != 0 || WASM_ENABLE_DEBUG_AOT != 0 \
|| WASM_ENABLE_FAST_JIT != 0
uint8 *load_addr;
uint64 load_size;
uint64 buf_code_size;
#endif
#if WASM_ENABLE_DEBUG_INTERP != 0
@ -470,6 +485,11 @@ struct WASMModule {
#if WASM_ENABLE_LOAD_CUSTOM_SECTION != 0
WASMCustomSection *custom_section_list;
#endif
#if WASM_ENABLE_FAST_JIT != 0
/* point to JITed functions */
void **fast_jit_func_ptrs;
#endif
};
typedef struct BlockType {
@ -593,6 +613,9 @@ wasm_value_type_cell_num_outside(uint8 value_type)
inline static bool
wasm_type_equal(const WASMType *type1, const WASMType *type2)
{
if (type1 == type2) {
return true;
}
return (type1->param_count == type2->param_count
&& type1->result_count == type2->result_count
&& memcmp(type1->types, type2->types,
@ -612,6 +635,7 @@ wasm_get_smallest_type_idx(WASMType **types, uint32 type_count,
if (wasm_type_equal(types[cur_type_idx], types[i]))
return i;
}
(void)type_count;
return cur_type_idx;
}

View File

@ -26,6 +26,10 @@ typedef struct WASMInterpFrame {
/* Instruction pointer of the bytecode array. */
uint8 *ip;
#if WASM_ENABLE_FAST_JIT != 0
uint8 *jitted_return_addr;
#endif
#if WASM_ENABLE_PERF_PROFILING != 0
uint64 time_started;
#endif
@ -47,12 +51,13 @@ typedef struct WASMInterpFrame {
WASMBranchBlock *csp_boundary;
WASMBranchBlock *csp;
/* Frame data, the layout is:
lp: param_cell_count + local_cell_count
sp_bottom to sp_boundary: stack of data
csp_bottom to csp_boundary: stack of block
ref to frame end: data types of local vairables and stack data
*/
/**
* Frame data, the layout is:
* lp: parameters and local variables
* sp_bottom to sp_boundary: wasm operand stack
* csp_bottom to csp_boundary: wasm label stack
* jit spill cache: only available for fast jit
*/
uint32 lp[1];
#endif
} WASMInterpFrame;
@ -84,6 +89,18 @@ wasm_interp_call_wasm(struct WASMModuleInstance *module_inst,
struct WASMFunctionInstance *function, uint32 argc,
uint32 argv[]);
/**
* @brief Restore the wasm stack frame to the last native frame or the begging
* of the whole stack
* @note e.g. for stack "begin --> interp --> interp", it will back to the
* "begin", for stack "begin --> interp --> native --> interp", it will become
* "begin --> interp --> native"
*
* @param exec_env the execution environment
*/
void
wasm_interp_restore_wasm_frame(struct WASMExecEnv *exec_env);
#ifdef __cplusplus
}
#endif

View File

@ -16,6 +16,9 @@
#include "../libraries/thread-mgr/thread_manager.h"
#include "../libraries/debug-engine/debug_engine.h"
#endif
#if WASM_ENABLE_FAST_JIT != 0
#include "../fast-jit/jit_compiler.h"
#endif
typedef int32 CellType_I32;
typedef int64 CellType_I64;
@ -778,6 +781,26 @@ FREE_FRAME(WASMExecEnv *exec_env, WASMInterpFrame *frame)
wasm_exec_env_free_wasm_frame(exec_env, frame);
}
void
wasm_interp_restore_wasm_frame(WASMExecEnv *exec_env)
{
WASMInterpFrame *cur_frame, *prev_frame;
cur_frame = wasm_exec_env_get_cur_frame(exec_env);
while (cur_frame) {
prev_frame = cur_frame->prev_frame;
if (cur_frame->ip) {
/* FREE_FRAME just set the wasm_stack.s.top pointer, we only need to
* call it once */
FREE_FRAME(exec_env, cur_frame);
break;
}
cur_frame = prev_frame;
}
wasm_exec_env_set_cur_frame(exec_env, cur_frame);
}
static void
wasm_interp_call_func_native(WASMModuleInstance *module_inst,
WASMExecEnv *exec_env,
@ -855,6 +878,20 @@ wasm_interp_call_func_native(WASMModuleInstance *module_inst,
wasm_exec_env_set_cur_frame(exec_env, prev_frame);
}
#if WASM_ENABLE_FAST_JIT != 0
bool
jit_invoke_native(WASMExecEnv *exec_env, uint32 func_idx,
WASMInterpFrame *prev_frame)
{
WASMModuleInstance *module_inst =
(WASMModuleInstance *)exec_env->module_inst;
WASMFunctionInstance *cur_func = module_inst->functions + func_idx;
wasm_interp_call_func_native(module_inst, exec_env, cur_func, prev_frame);
return wasm_get_exception(module_inst) ? false : true;
}
#endif
#if WASM_ENABLE_MULTI_MODULE != 0
static void
wasm_interp_call_func_bytecode(WASMModuleInstance *module,
@ -1354,7 +1391,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
tbl_inst = wasm_get_table_inst(module, tbl_idx);
val = POP_I32();
if (val < 0 || val >= (int32)tbl_inst->cur_size) {
if ((uint32)val >= tbl_inst->cur_size) {
wasm_set_exception(module, "undefined element");
goto got_exception;
}
@ -1382,10 +1419,12 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
cur_func_type = cur_func->u.func_import->func_type;
else
cur_func_type = cur_func->u.func->func_type;
if (!wasm_type_equal(cur_type, cur_func_type)) {
if (cur_type != cur_func_type) {
wasm_set_exception(module, "indirect call type mismatch");
goto got_exception;
}
#if WASM_ENABLE_TAIL_CALL != 0
if (opcode == WASM_OP_RETURN_CALL_INDIRECT)
goto call_func_from_return_call;
@ -3897,7 +3936,56 @@ wasm_interp_call_wasm(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
}
}
else {
#if WASM_ENABLE_FAST_JIT == 0
wasm_interp_call_func_bytecode(module_inst, exec_env, function, frame);
#else
JitGlobals *jit_globals = jit_compiler_get_jit_globals();
JitInterpSwitchInfo info;
WASMType *func_type = function->u.func->func_type;
uint8 type = func_type->result_count
? func_type->types[func_type->param_count]
: VALUE_TYPE_VOID;
#if WASM_ENABLE_REF_TYPES != 0
if (type == VALUE_TYPE_EXTERNREF || type == VALUE_TYPE_FUNCREF)
type = VALUE_TYPE_I32;
#endif
info.out.ret.last_return_type = type;
info.frame = frame;
frame->jitted_return_addr =
(uint8 *)jit_globals->return_to_interp_from_jitted;
jit_interp_switch_to_jitted(exec_env, &info,
function->u.func->fast_jit_jitted_code);
if (func_type->result_count) {
switch (type) {
case VALUE_TYPE_I32:
*(frame->sp - function->ret_cell_num) =
info.out.ret.ival[0];
break;
case VALUE_TYPE_I64:
*(frame->sp - function->ret_cell_num) =
info.out.ret.ival[0];
*(frame->sp - function->ret_cell_num + 1) =
info.out.ret.ival[1];
break;
case VALUE_TYPE_F32:
*(frame->sp - function->ret_cell_num) =
info.out.ret.fval[0];
break;
case VALUE_TYPE_F64:
*(frame->sp - function->ret_cell_num) =
info.out.ret.fval[0];
*(frame->sp - function->ret_cell_num + 1) =
info.out.ret.fval[1];
break;
default:
bh_assert(0);
break;
}
}
(void)wasm_interp_call_func_bytecode;
#endif
}
/* Output the return value to the caller */

View File

@ -845,6 +845,26 @@ FREE_FRAME(WASMExecEnv *exec_env, WASMInterpFrame *frame)
wasm_exec_env_free_wasm_frame(exec_env, frame);
}
void
wasm_interp_restore_wasm_frame(WASMExecEnv *exec_env)
{
WASMInterpFrame *cur_frame, *prev_frame;
cur_frame = wasm_exec_env_get_cur_frame(exec_env);
while (cur_frame) {
prev_frame = cur_frame->prev_frame;
if (cur_frame->ip) {
/* FREE_FRAME just set the wasm_stack.s.top pointer, we only need to
* call it once */
FREE_FRAME(exec_env, cur_frame);
break;
}
cur_frame = prev_frame;
}
wasm_exec_env_set_cur_frame(exec_env, cur_frame);
}
static void
wasm_interp_call_func_native(WASMModuleInstance *module_inst,
WASMExecEnv *exec_env,
@ -1296,7 +1316,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
val = GET_OPERAND(uint32, I32, 0);
frame_ip += 2;
if (val < 0 || val >= (int32)tbl_inst->cur_size) {
if ((uint32)val >= tbl_inst->cur_size) {
wasm_set_exception(module, "undefined element");
goto got_exception;
}
@ -1324,10 +1344,12 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
cur_func_type = cur_func->u.func_import->func_type;
else
cur_func_type = cur_func->u.func->func_type;
if (!wasm_type_equal(cur_type, cur_func_type)) {
if (cur_type != cur_func_type) {
wasm_set_exception(module, "indirect call type mismatch");
goto got_exception;
}
#if WASM_ENABLE_TAIL_CALL != 0
if (opcode == WASM_OP_RETURN_CALL_INDIRECT)
goto call_func_from_return_call;
@ -3925,6 +3947,7 @@ wasm_interp_call_wasm(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
wasm_interp_dump_call_stack(exec_env, true, NULL, 0);
}
#endif
LOG_DEBUG("meet an exception %s", wasm_get_exception(module_inst));
}
wasm_exec_env_set_cur_frame(exec_env, prev_frame);

View File

@ -14,6 +14,10 @@
#if WASM_ENABLE_DEBUG_INTERP != 0
#include "../libraries/debug-engine/debug_engine.h"
#endif
#if WASM_ENABLE_FAST_JIT != 0
#include "../fast-jit/jit_compiler.h"
#include "../fast-jit/jit_codecache.h"
#endif
/* Read a value of given type from the address pointed to by the given
pointer and increase the pointer to the position just after the
@ -411,6 +415,19 @@ const_str_list_insert(const uint8 *str, uint32 len, WASMModule *module,
return node->str;
}
static void
destroy_wasm_type(WASMType *type)
{
if (type->ref_count > 1) {
/* The type is referenced by other types
of current wasm module */
type->ref_count--;
return;
}
wasm_runtime_free(type);
}
static bool
load_init_expr(const uint8 **p_buf, const uint8 *buf_end,
InitializerExpression *init_expr, uint8 type, char *error_buf,
@ -578,6 +595,7 @@ load_type_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module,
}
/* Resolve param types and result types */
type->ref_count = 1;
type->param_count = (uint16)param_count;
type->result_count = (uint16)result_count;
for (j = 0; j < param_count; j++) {
@ -607,6 +625,21 @@ load_type_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module,
}
type->param_cell_num = (uint16)param_cell_num;
type->ret_cell_num = (uint16)ret_cell_num;
/* If there is already a same type created, use it instead */
for (j = 0; j < i; j++) {
if (wasm_type_equal(type, module->types[j])) {
if (module->types[j]->ref_count == UINT16_MAX) {
set_error_buf(error_buf, error_buf_size,
"wasm type's ref count too large");
return false;
}
destroy_wasm_type(type);
module->types[i] = module->types[j];
module->types[j]->ref_count++;
break;
}
}
}
}
@ -2890,6 +2923,28 @@ fail:
return false;
}
#if WASM_ENABLE_FAST_JIT != 0
static void
calculate_global_data_offset(WASMModule *module)
{
uint32 i, data_offset;
data_offset = 0;
for (i = 0; i < module->import_global_count; i++) {
WASMGlobalImport *import_global =
&((module->import_globals + i)->u.global);
import_global->data_offset = data_offset;
data_offset += wasm_value_type_size(import_global->type);
}
for (i = 0; i < module->global_count; i++) {
WASMGlobal *global = module->globals + i;
global->data_offset = data_offset;
data_offset += wasm_value_type_size(global->type);
}
}
#endif
static bool
wasm_loader_prepare_bytecode(WASMModule *module, WASMFunction *func,
uint32 cur_func_idx, char *error_buf,
@ -3277,6 +3332,21 @@ load_from_sections(WASMModule *module, WASMSection *sections,
#endif
}
#if WASM_ENABLE_FAST_JIT != 0
calculate_global_data_offset(module);
if (module->function_count
&& !(module->fast_jit_func_ptrs =
loader_malloc(sizeof(void *) * module->function_count,
error_buf, error_buf_size))) {
return false;
}
if (!jit_compiler_compile_all(module)) {
set_error_buf(error_buf, error_buf_size, "fast jit compilation failed");
return false;
}
#endif
#if WASM_ENABLE_MEMORY_TRACING != 0
wasm_runtime_dump_module_mem_consumption((WASMModuleCommon *)module);
#endif
@ -3652,7 +3722,7 @@ wasm_loader_load(uint8 *buf, uint32 size,
return NULL;
}
#if WASM_ENABLE_DEBUG_INTERP != 0
#if WASM_ENABLE_DEBUG_INTERP != 0 || WASM_ENABLE_FAST_JIT != 0
module->load_addr = (uint8 *)buf;
module->load_size = size;
#endif
@ -3688,7 +3758,7 @@ wasm_loader_unload(WASMModule *module)
if (module->types) {
for (i = 0; i < module->type_count; i++) {
if (module->types[i])
wasm_runtime_free(module->types[i]);
destroy_wasm_type(module->types[i]);
}
wasm_runtime_free(module->types);
}
@ -3800,6 +3870,16 @@ wasm_loader_unload(WASMModule *module)
wasm_runtime_destroy_custom_sections(module->custom_section_list);
#endif
#if WASM_ENABLE_FAST_JIT != 0
if (module->fast_jit_func_ptrs) {
for (i = 0; i < module->function_count; i++) {
if (module->fast_jit_func_ptrs[i])
jit_code_cache_free(module->fast_jit_func_ptrs[i]);
}
wasm_runtime_free(module->fast_jit_func_ptrs);
}
#endif
wasm_runtime_free(module);
}
@ -4482,6 +4562,10 @@ typedef struct WASMLoaderContext {
uint8 *p_code_compiled;
uint8 *p_code_compiled_end;
uint32 code_compiled_size;
/* If the last opcode will be dropped, the peak memory usage will be larger
* than the final code_compiled_size, we record the peak size to ensure
* there will not be invalid memory access during second traverse */
uint32 code_compiled_peak_size;
#endif
} WASMLoaderContext;
@ -5026,9 +5110,10 @@ static bool
wasm_loader_ctx_reinit(WASMLoaderContext *ctx)
{
if (!(ctx->p_code_compiled =
loader_malloc(ctx->code_compiled_size, NULL, 0)))
loader_malloc(ctx->code_compiled_peak_size, NULL, 0)))
return false;
ctx->p_code_compiled_end = ctx->p_code_compiled + ctx->code_compiled_size;
ctx->p_code_compiled_end =
ctx->p_code_compiled + ctx->code_compiled_peak_size;
/* clean up frame ref */
memset(ctx->frame_ref_bottom, 0, ctx->frame_ref_size);
@ -5053,6 +5138,15 @@ wasm_loader_ctx_reinit(WASMLoaderContext *ctx)
return true;
}
static void
increase_compiled_code_space(WASMLoaderContext *ctx, int32 size)
{
ctx->code_compiled_size += size;
if (ctx->code_compiled_size >= ctx->code_compiled_peak_size) {
ctx->code_compiled_peak_size = ctx->code_compiled_size;
}
}
static void
wasm_loader_emit_const(WASMLoaderContext *ctx, void *value, bool is_32_bit)
{
@ -5071,7 +5165,7 @@ wasm_loader_emit_const(WASMLoaderContext *ctx, void *value, bool is_32_bit)
#if WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0
bh_assert((ctx->code_compiled_size & 1) == 0);
#endif
ctx->code_compiled_size += size;
increase_compiled_code_space(ctx, size);
}
}
@ -5089,7 +5183,7 @@ wasm_loader_emit_uint32(WASMLoaderContext *ctx, uint32 value)
#if WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0
bh_assert((ctx->code_compiled_size & 1) == 0);
#endif
ctx->code_compiled_size += sizeof(uint32);
increase_compiled_code_space(ctx, sizeof(uint32));
}
}
@ -5107,7 +5201,7 @@ wasm_loader_emit_int16(WASMLoaderContext *ctx, int16 value)
#if WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0
bh_assert((ctx->code_compiled_size & 1) == 0);
#endif
ctx->code_compiled_size += sizeof(int16);
increase_compiled_code_space(ctx, sizeof(uint16));
}
}
@ -5123,9 +5217,9 @@ wasm_loader_emit_uint8(WASMLoaderContext *ctx, uint8 value)
#endif
}
else {
ctx->code_compiled_size += sizeof(uint8);
increase_compiled_code_space(ctx, sizeof(uint8));
#if WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0
ctx->code_compiled_size++;
increase_compiled_code_space(ctx, sizeof(uint8));
bh_assert((ctx->code_compiled_size & 1) == 0);
#endif
}
@ -5145,7 +5239,7 @@ wasm_loader_emit_ptr(WASMLoaderContext *ctx, void *value)
#if WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0
bh_assert((ctx->code_compiled_size & 1) == 0);
#endif
ctx->code_compiled_size += sizeof(void *);
increase_compiled_code_space(ctx, sizeof(void *));
}
}
@ -6864,14 +6958,14 @@ re_scan:
loader_ctx->frame_csp->end_addr = p - 1;
}
else {
/* end of function block, function will return,
ignore the following bytecodes */
p = p_end;
continue;
/* end of function block, function will return */
if (p < p_end) {
set_error_buf(error_buf, error_buf_size,
"section size mismatch");
goto fail;
}
}
SET_CUR_BLOCK_STACK_POLYMORPHIC_STATE(false);
break;
}
@ -7537,7 +7631,7 @@ re_scan:
goto fail;
}
if (func_idx == cur_func_idx) {
if (func_idx == cur_func_idx + module->import_function_count) {
WASMTableSeg *table_seg = module->table_segments;
bool func_declared = false;
uint32 j;
@ -7547,8 +7641,7 @@ re_scan:
if (table_seg->elem_type == VALUE_TYPE_FUNCREF
&& wasm_elem_is_declarative(table_seg->mode)) {
for (j = 0; j < table_seg->function_count; j++) {
if (table_seg->func_indexes[j]
== cur_func_idx) {
if (table_seg->func_indexes[j] == func_idx) {
func_declared = true;
break;
}
@ -7584,7 +7677,7 @@ re_scan:
PUSH_OFFSET_TYPE(local_type);
#else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_DEBUG_INTERP == 0)
&& (WASM_ENABLE_FAST_JIT == 0) && (WASM_ENABLE_DEBUG_INTERP == 0)
if (local_offset < 0x80) {
*p_org++ = EXT_OP_GET_LOCAL_FAST;
if (is_32bit_type(local_type)) {
@ -7648,7 +7741,7 @@ re_scan:
}
#else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_DEBUG_INTERP == 0)
&& (WASM_ENABLE_FAST_JIT == 0) && (WASM_ENABLE_DEBUG_INTERP == 0)
if (local_offset < 0x80) {
*p_org++ = EXT_OP_SET_LOCAL_FAST;
if (is_32bit_type(local_type)) {
@ -7708,7 +7801,7 @@ re_scan:
- wasm_value_type_cell_num(local_type)));
#else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_DEBUG_INTERP == 0)
&& (WASM_ENABLE_FAST_JIT == 0) && (WASM_ENABLE_DEBUG_INTERP == 0)
if (local_offset < 0x80) {
*p_org++ = EXT_OP_TEE_LOCAL_FAST;
if (is_32bit_type(local_type)) {

View File

@ -11,6 +11,10 @@
#include "wasm_runtime.h"
#include "../common/wasm_native.h"
#include "../common/wasm_memory.h"
#if WASM_ENABLE_FAST_JIT != 0
#include "../fast-jit/jit_compiler.h"
#include "../fast-jit/jit_codecache.h"
#endif
/* Read a value of given type from the address pointed to by the given
pointer and increase the pointer to the position just after the
@ -244,6 +248,19 @@ const_str_list_insert(const uint8 *str, uint32 len, WASMModule *module,
return node->str;
}
static void
destroy_wasm_type(WASMType *type)
{
if (type->ref_count > 1) {
/* The type is referenced by other types
of current wasm module */
type->ref_count--;
return;
}
wasm_runtime_free(type);
}
static bool
load_init_expr(const uint8 **p_buf, const uint8 *buf_end,
InitializerExpression *init_expr, uint8 type, char *error_buf,
@ -368,6 +385,7 @@ load_type_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module,
}
/* Resolve param types and result types */
type->ref_count = 1;
type->param_count = (uint16)param_count;
type->result_count = (uint16)result_count;
for (j = 0; j < param_count; j++) {
@ -390,6 +408,17 @@ load_type_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module,
&& ret_cell_num <= UINT16_MAX);
type->param_cell_num = (uint16)param_cell_num;
type->ret_cell_num = (uint16)ret_cell_num;
/* If there is already a same type created, use it instead */
for (j = 0; j < i; ++j) {
if (wasm_type_equal(type, module->types[j])) {
bh_assert(module->types[j]->ref_count != UINT16_MAX);
destroy_wasm_type(type);
module->types[i] = module->types[j];
module->types[j]->ref_count++;
break;
}
}
}
}
@ -1988,6 +2017,7 @@ load_from_sections(WASMModule *module, WASMSection *sections,
module->malloc_function = (uint32)-1;
module->free_function = (uint32)-1;
module->retain_function = (uint32)-1;
/* Resolve malloc/free function exported by wasm module */
export = module->exports;
@ -2138,6 +2168,18 @@ load_from_sections(WASMModule *module, WASMSection *sections,
}
}
#if WASM_ENABLE_FAST_JIT != 0
if (!(module->fast_jit_func_ptrs =
loader_malloc(sizeof(void *) * module->function_count, error_buf,
error_buf_size))) {
return false;
}
if (!jit_compiler_compile_all(module)) {
set_error_buf(error_buf, error_buf_size, "fast jit compilation failed");
return false;
}
#endif
#if WASM_ENABLE_MEMORY_TRACING != 0
wasm_runtime_dump_module_mem_consumption(module);
#endif
@ -2355,6 +2397,11 @@ wasm_loader_load(uint8 *buf, uint32 size, char *error_buf,
return NULL;
}
#if WASM_ENABLE_FAST_JIT != 0
module->load_addr = (uint8 *)buf;
module->load_size = size;
#endif
if (!load(buf, size, module, error_buf, error_buf_size)) {
goto fail;
}
@ -2378,7 +2425,7 @@ wasm_loader_unload(WASMModule *module)
if (module->types) {
for (i = 0; i < module->type_count; i++) {
if (module->types[i])
wasm_runtime_free(module->types[i]);
destroy_wasm_type(module->types[i]);
}
wasm_runtime_free(module->types);
}
@ -2452,6 +2499,16 @@ wasm_loader_unload(WASMModule *module)
}
#endif
#if WASM_ENABLE_FAST_JIT != 0
if (module->fast_jit_func_ptrs) {
for (i = 0; i < module->function_count; i++) {
if (module->fast_jit_func_ptrs[i])
jit_code_cache_free(module->fast_jit_func_ptrs[i]);
}
wasm_runtime_free(module->fast_jit_func_ptrs);
}
#endif
wasm_runtime_free(module);
}
@ -3006,6 +3063,10 @@ typedef struct WASMLoaderContext {
uint8 *p_code_compiled;
uint8 *p_code_compiled_end;
uint32 code_compiled_size;
/* If the last opcode will be dropped, the peak memory usage will be larger
* than the final code_compiled_size, we record the peak size to ensure
* there will not be invalid memory access during second traverse */
uint32 code_compiled_peak_size;
#endif
} WASMLoaderContext;
@ -3498,9 +3559,10 @@ static bool
wasm_loader_ctx_reinit(WASMLoaderContext *ctx)
{
if (!(ctx->p_code_compiled =
loader_malloc(ctx->code_compiled_size, NULL, 0)))
loader_malloc(ctx->code_compiled_peak_size, NULL, 0)))
return false;
ctx->p_code_compiled_end = ctx->p_code_compiled + ctx->code_compiled_size;
ctx->p_code_compiled_end =
ctx->p_code_compiled + ctx->code_compiled_peak_size;
/* clean up frame ref */
memset(ctx->frame_ref_bottom, 0, ctx->frame_ref_size);
@ -3525,6 +3587,15 @@ wasm_loader_ctx_reinit(WASMLoaderContext *ctx)
return true;
}
static void
increase_compiled_code_space(WASMLoaderContext *ctx, int32 size)
{
ctx->code_compiled_size += size;
if (ctx->code_compiled_size >= ctx->code_compiled_peak_size) {
ctx->code_compiled_peak_size = ctx->code_compiled_size;
}
}
static void
wasm_loader_emit_const(WASMLoaderContext *ctx, void *value, bool is_32_bit)
{
@ -3543,7 +3614,7 @@ wasm_loader_emit_const(WASMLoaderContext *ctx, void *value, bool is_32_bit)
#if WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0
bh_assert((ctx->code_compiled_size & 1) == 0);
#endif
ctx->code_compiled_size += size;
increase_compiled_code_space(ctx, size);
}
}
@ -3561,7 +3632,7 @@ wasm_loader_emit_uint32(WASMLoaderContext *ctx, uint32 value)
#if WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0
bh_assert((ctx->code_compiled_size & 1) == 0);
#endif
ctx->code_compiled_size += sizeof(uint32);
increase_compiled_code_space(ctx, sizeof(uint32));
}
}
@ -3579,7 +3650,7 @@ wasm_loader_emit_int16(WASMLoaderContext *ctx, int16 value)
#if WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0
bh_assert((ctx->code_compiled_size & 1) == 0);
#endif
ctx->code_compiled_size += sizeof(int16);
increase_compiled_code_space(ctx, sizeof(uint16));
}
}
@ -3595,9 +3666,9 @@ wasm_loader_emit_uint8(WASMLoaderContext *ctx, uint8 value)
#endif
}
else {
ctx->code_compiled_size += sizeof(uint8);
increase_compiled_code_space(ctx, sizeof(uint8));
#if WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0
ctx->code_compiled_size++;
increase_compiled_code_space(ctx, sizeof(uint8));
bh_assert((ctx->code_compiled_size & 1) == 0);
#endif
}
@ -3617,7 +3688,7 @@ wasm_loader_emit_ptr(WASMLoaderContext *ctx, void *value)
#if WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0
bh_assert((ctx->code_compiled_size & 1) == 0);
#endif
ctx->code_compiled_size += sizeof(void *);
increase_compiled_code_space(ctx, sizeof(void *));
}
}
@ -5156,14 +5227,10 @@ re_scan:
loader_ctx->frame_csp->end_addr = p - 1;
}
else {
/* end of function block, function will return,
ignore the following bytecodes */
p = p_end;
continue;
/* end of function block, function will return */
bh_assert(p == p_end);
}
SET_CUR_BLOCK_STACK_POLYMORPHIC_STATE(false);
break;
}
@ -5731,7 +5798,7 @@ re_scan:
goto fail;
}
if (func_idx == cur_func_idx) {
if (func_idx == cur_func_idx + module->import_function_count) {
WASMTableSeg *table_seg = module->table_segments;
bool func_declared = false;
uint32 j;
@ -5741,8 +5808,7 @@ re_scan:
if (table_seg->elem_type == VALUE_TYPE_FUNCREF
&& wasm_elem_is_declarative(table_seg->mode)) {
for (j = 0; j < table_seg->function_count; j++) {
if (table_seg->func_indexes[j]
== cur_func_idx) {
if (table_seg->func_indexes[j] == func_idx) {
func_declared = true;
break;
}
@ -5777,7 +5843,8 @@ re_scan:
operand_offset = local_offset;
PUSH_OFFSET_TYPE(local_type);
#else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0)
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_FAST_JIT == 0)
if (local_offset < 0x80) {
*p_org++ = EXT_OP_GET_LOCAL_FAST;
if (is_32bit_type(local_type))
@ -5837,7 +5904,8 @@ re_scan:
POP_OFFSET_TYPE(local_type);
}
#else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0)
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_FAST_JIT == 0)
if (local_offset < 0x80) {
*p_org++ = EXT_OP_SET_LOCAL_FAST;
if (is_32bit_type(local_type))
@ -5893,7 +5961,8 @@ re_scan:
*(loader_ctx->frame_offset
- wasm_value_type_cell_num(local_type)));
#else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0)
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_FAST_JIT == 0)
if (local_offset < 0x80) {
*p_org++ = EXT_OP_TEE_LOCAL_FAST;
if (is_32bit_type(local_type))
@ -6467,7 +6536,7 @@ re_scan:
break;
case WASM_OP_MEMORY_COPY:
/* both src and dst memory index should be 0 */
bh_assert(*(int16 *)p != 0x0000);
bh_assert(*(int16 *)p == 0x0000);
p += 2;
bh_assert(module->import_memory_count

View File

@ -56,7 +56,7 @@ wasm_load(uint8 *buf, uint32 size, char *error_buf, uint32 error_buf_size)
WASMModule *
wasm_load_from_sections(WASMSection *section_list, char *error_buf,
uint32_t error_buf_size)
uint32 error_buf_size)
{
return wasm_loader_load_from_sections(section_list, error_buf,
error_buf_size);
@ -349,6 +349,24 @@ memory_instantiate(WASMModuleInstance *module_inst, uint32 num_bytes_per_page,
}
}
#if WASM_ENABLE_FAST_JIT != 0
if (memory_data_size > 0) {
#if UINTPTR_MAX == UINT64_MAX
memory->mem_bound_check_1byte = memory_data_size - 1;
memory->mem_bound_check_2bytes = memory_data_size - 2;
memory->mem_bound_check_4bytes = memory_data_size - 4;
memory->mem_bound_check_8bytes = memory_data_size - 8;
memory->mem_bound_check_16bytes = memory_data_size - 16;
#else
memory->mem_bound_check_1byte = (uint32)memory_data_size - 1;
memory->mem_bound_check_2bytes = (uint32)memory_data_size - 2;
memory->mem_bound_check_4bytes = (uint32)memory_data_size - 4;
memory->mem_bound_check_8bytes = (uint32)memory_data_size - 8;
memory->mem_bound_check_16bytes = (uint32)memory_data_size - 16;
#endif
}
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
if (0 != os_mutex_init(&memory->mem_lock)) {
set_error_buf(error_buf, error_buf_size, "init mutex failed");
@ -693,6 +711,10 @@ functions_instantiate(const WASMModule *module, WASMModuleInstance *module_inst,
function++;
}
#if WASM_ENABLE_FAST_JIT != 0
module_inst->fast_jit_func_ptrs = module->fast_jit_func_ptrs;
#endif
bh_assert((uint32)(function - functions) == function_count);
(void)module_inst;
return functions;
@ -765,13 +787,13 @@ globals_instantiate(const WASMModule *module, WASMModuleInstance *module_inst,
if (!(global->import_module_inst = get_sub_module_inst(
module_inst, global_import->import_module))) {
set_error_buf(error_buf, error_buf_size, "unknown global");
return NULL;
goto fail;
}
if (!(global->import_global_inst = wasm_lookup_global(
global->import_module_inst, global_import->field_name))) {
set_error_buf(error_buf, error_buf_size, "unknown global");
return NULL;
goto fail;
}
/* The linked global instance has been initialized, we
@ -807,7 +829,7 @@ globals_instantiate(const WASMModule *module, WASMModuleInstance *module_inst,
if (init_expr->init_expr_type == INIT_EXPR_TYPE_GET_GLOBAL) {
if (!check_global_init_expr(module, init_expr->u.global_index,
error_buf, error_buf_size)) {
return NULL;
goto fail;
}
bh_memcpy_s(
@ -831,6 +853,9 @@ globals_instantiate(const WASMModule *module, WASMModuleInstance *module_inst,
*p_global_data_size = global_data_offset;
(void)module_inst;
return globals;
fail:
wasm_runtime_free(globals);
return NULL;
}
/**
@ -952,7 +977,7 @@ execute_post_inst_function(WASMModuleInstance *module_inst)
return true;
return wasm_create_exec_env_and_call_function(module_inst, post_inst_func,
0, NULL, false);
0, NULL);
}
#if WASM_ENABLE_BULK_MEMORY != 0
@ -981,7 +1006,7 @@ execute_memory_init_function(WASMModuleInstance *module_inst)
return true;
return wasm_create_exec_env_and_call_function(module_inst, memory_init_func,
0, NULL, false);
0, NULL);
}
#endif
@ -996,8 +1021,7 @@ execute_start_function(WASMModuleInstance *module_inst)
bh_assert(!func->is_import_func && func->param_cell_num == 0
&& func->ret_cell_num == 0);
return wasm_create_exec_env_and_call_function(module_inst, func, 0, NULL,
false);
return wasm_create_exec_env_and_call_function(module_inst, func, 0, NULL);
}
static bool
@ -1041,11 +1065,11 @@ execute_malloc_function(WASMModuleInstance *module_inst,
#endif
{
ret = wasm_create_exec_env_and_call_function(module_inst, malloc_func,
argc, argv, false);
argc, argv);
if (retain_func && ret) {
ret = wasm_create_exec_env_and_call_function(
module_inst, retain_func, 1, argv, false);
ret = wasm_create_exec_env_and_call_function(module_inst,
retain_func, 1, argv);
}
}
@ -1074,7 +1098,7 @@ execute_free_function(WASMModuleInstance *module_inst,
#endif
{
return wasm_create_exec_env_and_call_function(module_inst, free_func, 1,
argv, false);
argv);
}
}
@ -1134,7 +1158,7 @@ sub_module_instantiate(WASMModule *module, WASMModuleInstance *module_inst,
wasm_lookup_function(sub_module_inst, "_initialize", NULL);
if (initialize
&& !wasm_create_exec_env_and_call_function(
sub_module_inst, initialize, 0, NULL, false)) {
sub_module_inst, initialize, 0, NULL)) {
set_error_buf(error_buf, error_buf_size,
"Call _initialize failed ");
goto failed;
@ -1964,6 +1988,15 @@ call_wasm_with_hw_bound_check(WASMModuleInstance *module_inst,
ret = false;
}
if (wasm_get_exception(module_inst)) {
#if WASM_ENABLE_DUMP_CALL_STACK != 0
if (wasm_interp_create_call_stack(exec_env)) {
wasm_interp_dump_call_stack(exec_env, true, NULL, 0);
}
#endif
wasm_interp_restore_wasm_frame(exec_env);
}
jmpbuf_node_pop = wasm_exec_env_pop_jmpbuf(exec_env);
bh_assert(&jmpbuf_node == jmpbuf_node_pop);
if (!exec_env->jmpbuf_stack_top) {
@ -1998,10 +2031,9 @@ wasm_call_function(WASMExecEnv *exec_env, WASMFunctionInstance *function,
bool
wasm_create_exec_env_and_call_function(WASMModuleInstance *module_inst,
WASMFunctionInstance *func,
unsigned argc, uint32 argv[],
bool enable_debug)
unsigned argc, uint32 argv[])
{
WASMExecEnv *exec_env, *existing_exec_env = NULL;
WASMExecEnv *exec_env = NULL, *existing_exec_env = NULL;
bool ret;
#if defined(OS_ENABLE_HW_BOUND_CHECK)
@ -2018,14 +2050,6 @@ wasm_create_exec_env_and_call_function(WASMModuleInstance *module_inst,
wasm_set_exception(module_inst, "allocate memory failed");
return false;
}
#if WASM_ENABLE_THREAD_MGR != 0
#if WASM_ENABLE_DEBUG_INTERP != 0
if (enable_debug) {
wasm_runtime_start_debug_instance(exec_env);
}
#endif
#endif
}
ret = wasm_call_function(exec_env, func, argc, argv);
@ -2470,6 +2494,22 @@ wasm_enlarge_memory(WASMModuleInstance *module, uint32 inc_page_count)
memory->memory_data_end =
memory->memory_data + memory->num_bytes_per_page * total_page_count;
#if WASM_ENABLE_FAST_JIT != 0
#if UINTPTR_MAX == UINT64_MAX
memory->mem_bound_check_1byte = total_size - 1;
memory->mem_bound_check_2bytes = total_size - 2;
memory->mem_bound_check_4bytes = total_size - 4;
memory->mem_bound_check_8bytes = total_size - 8;
memory->mem_bound_check_16bytes = total_size - 16;
#else
memory->mem_bound_check_1byte = (uint32)total_size - 1;
memory->mem_bound_check_2bytes = (uint32)total_size - 2;
memory->mem_bound_check_4bytes = (uint32)total_size - 4;
memory->mem_bound_check_8bytes = (uint32)total_size - 8;
memory->mem_bound_check_16bytes = (uint32)total_size - 16;
#endif
#endif
return ret;
}
#else
@ -2564,14 +2604,14 @@ wasm_enlarge_table(WASMModuleInstance *module_inst, uint32 table_idx,
}
#endif /* WASM_ENABLE_REF_TYPES != 0 */
bool
wasm_call_indirect(WASMExecEnv *exec_env, uint32_t tbl_idx,
uint32_t element_indices, uint32_t argc, uint32_t argv[])
static bool
call_indirect(WASMExecEnv *exec_env, uint32 tbl_idx, uint32 elem_idx,
uint32 argc, uint32 argv[], bool check_type_idx, uint32 type_idx)
{
WASMModuleInstance *module_inst = NULL;
WASMTableInstance *table_inst = NULL;
uint32_t function_indices = 0;
WASMFunctionInstance *function_inst = NULL;
uint32 func_idx = 0;
WASMFunctionInstance *func_inst = NULL;
module_inst = (WASMModuleInstance *)exec_env->module_inst;
bh_assert(module_inst);
@ -2582,7 +2622,7 @@ wasm_call_indirect(WASMExecEnv *exec_env, uint32_t tbl_idx,
goto got_exception;
}
if (element_indices >= table_inst->cur_size) {
if (elem_idx >= table_inst->cur_size) {
wasm_set_exception(module_inst, "undefined element");
goto got_exception;
}
@ -2591,8 +2631,8 @@ wasm_call_indirect(WASMExecEnv *exec_env, uint32_t tbl_idx,
* please be aware that table_inst->base_addr may point
* to another module's table
**/
function_indices = ((uint32_t *)table_inst->base_addr)[element_indices];
if (function_indices == NULL_REF) {
func_idx = ((uint32 *)table_inst->base_addr)[elem_idx];
if (func_idx == NULL_REF) {
wasm_set_exception(module_inst, "uninitialized element");
goto got_exception;
}
@ -2600,14 +2640,29 @@ wasm_call_indirect(WASMExecEnv *exec_env, uint32_t tbl_idx,
/**
* we insist to call functions owned by the module itself
**/
if (function_indices >= module_inst->function_count) {
if (func_idx >= module_inst->function_count) {
wasm_set_exception(module_inst, "unknown function");
goto got_exception;
}
function_inst = module_inst->functions + function_indices;
func_inst = module_inst->functions + func_idx;
interp_call_wasm(module_inst, exec_env, function_inst, argc, argv);
if (check_type_idx) {
WASMType *cur_type = module_inst->module->types[type_idx];
WASMType *cur_func_type;
if (func_inst->is_import_func)
cur_func_type = func_inst->u.func_import->func_type;
else
cur_func_type = func_inst->u.func->func_type;
if (cur_type != cur_func_type) {
wasm_set_exception(module_inst, "indirect call type mismatch");
goto got_exception;
}
}
interp_call_wasm(module_inst, exec_env, func_inst, argc, argv);
(void)clear_wasi_proc_exit_exception(module_inst);
return !wasm_get_exception(module_inst) ? true : false;
@ -2616,6 +2671,23 @@ got_exception:
return false;
}
bool
wasm_call_indirect(WASMExecEnv *exec_env, uint32 tbl_idx, uint32 elem_idx,
uint32 argc, uint32 argv[])
{
return call_indirect(exec_env, tbl_idx, elem_idx, argc, argv, false, 0);
}
#if WASM_ENABLE_FAST_JIT != 0
bool
jit_call_indirect(WASMExecEnv *exec_env, uint32 tbl_idx, uint32 elem_idx,
uint32 type_idx, uint32 argc, uint32 argv[])
{
return call_indirect(exec_env, tbl_idx, elem_idx, argc, argv, true,
type_idx);
}
#endif
#if WASM_ENABLE_THREAD_MGR != 0
bool
wasm_set_aux_stack(WASMExecEnv *exec_env, uint32 start_offset, uint32 size)

View File

@ -52,6 +52,22 @@ struct WASMMemoryInstance {
Note: when memory is re-allocated, the heap data and memory data
must be copied to new memory also. */
uint8 *memory_data;
#if WASM_ENABLE_FAST_JIT != 0
#if UINTPTR_MAX == UINT64_MAX
uint64 mem_bound_check_1byte;
uint64 mem_bound_check_2bytes;
uint64 mem_bound_check_4bytes;
uint64 mem_bound_check_8bytes;
uint64 mem_bound_check_16bytes;
#else
uint32 mem_bound_check_1byte;
uint32 mem_bound_check_2bytes;
uint32 mem_bound_check_4bytes;
uint32 mem_bound_check_8bytes;
uint32 mem_bound_check_16bytes;
#endif
#endif
};
struct WASMTableInstance {
@ -167,6 +183,10 @@ struct WASMModuleInstance {
/* Array of function pointers to import functions */
void **import_func_ptrs;
#if WASM_ENABLE_FAST_JIT != 0
/* point to JITed functions */
void **fast_jit_func_ptrs;
#endif
WASMMemoryInstance **memories;
WASMTableInstance **tables;
@ -198,9 +218,6 @@ struct WASMModuleInstance {
WASMExecEnv *exec_env_singleton;
uint32 temp_ret;
uint32 llvm_stack;
/* Default WASM stack size of threads of this Module instance. */
uint32 default_wasm_stack_size;
@ -280,7 +297,7 @@ wasm_load(uint8 *buf, uint32 size, char *error_buf, uint32 error_buf_size);
WASMModule *
wasm_load_from_sections(WASMSection *section_list, char *error_buf,
uint32_t error_buf_size);
uint32 error_buf_size);
void
wasm_unload(WASMModule *module);
@ -317,8 +334,7 @@ wasm_call_function(WASMExecEnv *exec_env, WASMFunctionInstance *function,
bool
wasm_create_exec_env_and_call_function(WASMModuleInstance *module_inst,
WASMFunctionInstance *function,
unsigned argc, uint32 argv[],
bool enable_debug);
unsigned argc, uint32 argv[]);
bool
wasm_create_exec_env_singleton(WASMModuleInstance *module_inst);
@ -366,16 +382,22 @@ wasm_get_app_addr_range(WASMModuleInstance *module_inst, uint32 app_offset,
uint32 *p_app_start_offset, uint32 *p_app_end_offset);
bool
wasm_get_native_addr_range(WASMModuleInstance *module_inst, uint8_t *native_ptr,
uint8_t **p_native_start_addr,
uint8_t **p_native_end_addr);
wasm_get_native_addr_range(WASMModuleInstance *module_inst, uint8 *native_ptr,
uint8 **p_native_start_addr,
uint8 **p_native_end_addr);
bool
wasm_enlarge_memory(WASMModuleInstance *module, uint32 inc_page_count);
bool
wasm_call_indirect(WASMExecEnv *exec_env, uint32_t tbl_idx,
uint32_t element_indices, uint32_t argc, uint32_t argv[]);
wasm_call_indirect(WASMExecEnv *exec_env, uint32 tbl_idx, uint32 elem_idx,
uint32 argc, uint32 argv[]);
#if WASM_ENABLE_FAST_JIT != 0
bool
jit_call_indirect(WASMExecEnv *exec_env, uint32 tbl_idx, uint32 elem_idx,
uint32 type_idx, uint32 argc, uint32 argv[]);
#endif
#if WASM_ENABLE_THREAD_MGR != 0
bool

View File

@ -5,6 +5,10 @@ set (LIB_PTHREAD_DIR ${CMAKE_CURRENT_LIST_DIR})
add_definitions (-DWASM_ENABLE_LIB_PTHREAD=1)
if (WAMR_BUILD_LIB_PTHREAD_SEMAPHORE EQUAL 1)
add_definitions (-DWASM_ENABLE_LIB_PTHREAD_SEMAPHORE=1)
endif()
include_directories(${LIB_PTHREAD_DIR})
file (GLOB source_all ${LIB_PTHREAD_DIR}/*.c)

View File

@ -54,6 +54,7 @@ enum {
T_THREAD,
T_MUTEX,
T_COND,
T_SEM,
};
enum thread_status_t {
@ -73,6 +74,12 @@ enum cond_status_t {
COND_DESTROYED,
};
enum sem_status_t {
SEM_CREATED,
SEM_CLOSED,
SEM_DESTROYED,
};
typedef struct ThreadKeyValueNode {
bh_list_link l;
wasm_exec_env_t exec_env;
@ -111,6 +118,9 @@ typedef struct ThreadInfoNode {
korp_tid thread;
korp_mutex *mutex;
korp_cond *cond;
#if WASM_ENABLE_LIB_PTHREAD_SEMAPHORE != 0
korp_sem *sem;
#endif
/* A copy of the thread return value */
void *ret;
} u;
@ -125,7 +135,15 @@ typedef struct {
wasm_module_inst_t module_inst;
} ThreadRoutineArgs;
typedef struct {
uint32 handle;
ThreadInfoNode *node;
} SemCallbackArgs;
static bh_list cluster_info_list;
#if WASM_ENABLE_LIB_PTHREAD_SEMAPHORE != 0
static HashMap *sem_info_map;
#endif
static korp_mutex thread_global_lock;
static uint32 handle_id = 1;
@ -160,6 +178,12 @@ thread_info_destroy(void *node)
os_cond_destroy(info_node->u.cond);
wasm_runtime_free(info_node->u.cond);
}
#if WASM_ENABLE_LIB_PTHREAD_SEMAPHORE != 0
else if (info_node->type == T_SEM) {
if (info_node->status != SEM_DESTROYED)
os_sem_close(info_node->u.sem);
}
#endif
wasm_runtime_free(info_node);
os_mutex_unlock(&thread_global_lock);
}
@ -174,12 +198,23 @@ lib_pthread_init()
os_mutex_destroy(&thread_global_lock);
return false;
}
#if WASM_ENABLE_LIB_PTHREAD_SEMAPHORE != 0
if (!(sem_info_map = bh_hash_map_create(
32, true, (HashFunc)wasm_string_hash,
(KeyEqualFunc)wasm_string_equal, NULL, thread_info_destroy))) {
os_mutex_destroy(&thread_global_lock);
return false;
}
#endif
return true;
}
void
lib_pthread_destroy()
{
#if WASM_ENABLE_LIB_PTHREAD_SEMAPHORE != 0
bh_hash_map_destroy(sem_info_map);
#endif
os_mutex_destroy(&thread_global_lock);
}
@ -1085,6 +1120,176 @@ posix_memalign_wrapper(wasm_exec_env_t exec_env, void **memptr, int32 align,
return 0;
}
#if WASM_ENABLE_LIB_PTHREAD_SEMAPHORE != 0
static int32
sem_open_wrapper(wasm_exec_env_t exec_env, const char *name, int32 oflags,
int32 mode, int32 val)
{
korp_sem *psem = NULL;
ThreadInfoNode *info_node = NULL;
/**
* For RTOS, global semaphore map is safe for share the same semaphore
* between task/pthread.
* For Unix like system, it's dedicated for multiple processes.
*/
if ((info_node = bh_hash_map_find(sem_info_map, (void *)name))) {
return info_node->handle;
}
if (!(psem = os_sem_open(name, oflags, mode, val))) {
goto fail1;
}
if (!(info_node = wasm_runtime_malloc(sizeof(ThreadInfoNode))))
goto fail2;
memset(info_node, 0, sizeof(ThreadInfoNode));
info_node->exec_env = exec_env;
info_node->handle = allocate_handle();
info_node->type = T_SEM;
info_node->u.sem = psem;
info_node->status = SEM_CREATED;
if (!bh_hash_map_insert(sem_info_map, (void *)name, info_node))
goto fail3;
return info_node->handle;
fail3:
wasm_runtime_free(info_node);
fail2:
os_sem_close(psem);
fail1:
return -1;
}
void
sem_fetch_cb(void *key, void *value, void *user_data)
{
(void)key;
SemCallbackArgs *args = user_data;
ThreadInfoNode *info_node = value;
if (args->handle == info_node->handle && info_node->status == SEM_CREATED) {
args->node = info_node;
}
}
static int32
sem_close_wrapper(wasm_exec_env_t exec_env, uint32 sem)
{
(void)exec_env;
int ret = -1;
SemCallbackArgs args = { sem, NULL };
bh_hash_map_traverse(sem_info_map, sem_fetch_cb, &args);
if (args.node) {
ret = os_sem_close(args.node->u.sem);
if (ret == 0) {
args.node->status = SEM_CLOSED;
}
}
return ret;
}
static int32
sem_wait_wrapper(wasm_exec_env_t exec_env, uint32 sem)
{
(void)exec_env;
SemCallbackArgs args = { sem, NULL };
bh_hash_map_traverse(sem_info_map, sem_fetch_cb, &args);
if (args.node) {
return os_sem_wait(args.node->u.sem);
}
return -1;
}
static int32
sem_trywait_wrapper(wasm_exec_env_t exec_env, uint32 sem)
{
(void)exec_env;
SemCallbackArgs args = { sem, NULL };
bh_hash_map_traverse(sem_info_map, sem_fetch_cb, &args);
if (args.node) {
return os_sem_trywait(args.node->u.sem);
}
return -1;
}
static int32
sem_post_wrapper(wasm_exec_env_t exec_env, uint32 sem)
{
(void)exec_env;
SemCallbackArgs args = { sem, NULL };
bh_hash_map_traverse(sem_info_map, sem_fetch_cb, &args);
if (args.node) {
return os_sem_post(args.node->u.sem);
}
return -1;
}
static int32
sem_getvalue_wrapper(wasm_exec_env_t exec_env, uint32 sem, int32 *sval)
{
int32 ret = -1;
wasm_module_inst_t module_inst = get_module_inst(exec_env);
(void)exec_env;
SemCallbackArgs args = { sem, NULL };
if (validate_native_addr(sval, sizeof(int32))) {
bh_hash_map_traverse(sem_info_map, sem_fetch_cb, &args);
if (args.node) {
ret = os_sem_getvalue(args.node->u.sem, sval);
}
}
return ret;
}
static int32
sem_unlink_wrapper(wasm_exec_env_t exec_env, const char *name)
{
(void)exec_env;
int32 ret_val;
ThreadInfoNode *info_node = bh_hash_map_find(sem_info_map, (void *)name);
if (!info_node || info_node->type != T_SEM)
return -1;
if (info_node->status != SEM_CLOSED) {
ret_val = os_sem_close(info_node->u.sem);
if (ret_val != 0) {
return ret_val;
}
}
ret_val = os_sem_unlink(name);
if (ret_val == 0) {
bh_hash_map_remove(sem_info_map, (void *)name, NULL, NULL);
info_node->status = SEM_DESTROYED;
thread_info_destroy(info_node);
}
return ret_val;
}
#endif
/* clang-format off */
#define REG_NATIVE_FUNC(func_name, signature) \
{ #func_name, func_name##_wrapper, signature, NULL }
@ -1113,6 +1318,15 @@ static NativeSymbol native_symbols_lib_pthread[] = {
REG_NATIVE_FUNC(pthread_getspecific, "(i)i"),
REG_NATIVE_FUNC(pthread_key_delete, "(i)i"),
REG_NATIVE_FUNC(posix_memalign, "(*ii)i"),
#if WASM_ENABLE_LIB_PTHREAD_SEMAPHORE != 0
REG_NATIVE_FUNC(sem_open, "($iii)i"),
REG_NATIVE_FUNC(sem_close, "(i)i"),
REG_NATIVE_FUNC(sem_wait, "(i)i"),
REG_NATIVE_FUNC(sem_trywait, "(i)i"),
REG_NATIVE_FUNC(sem_post, "(i)i"),
REG_NATIVE_FUNC(sem_getvalue, "(i*)i"),
REG_NATIVE_FUNC(sem_unlink, "($)i"),
#endif
};
uint32

View File

@ -16,18 +16,6 @@
void
wasm_runtime_set_exception(wasm_module_inst_t module, const char *exception);
uint32
wasm_runtime_get_temp_ret(wasm_module_inst_t module);
void
wasm_runtime_set_temp_ret(wasm_module_inst_t module, uint32 temp_ret);
uint32
wasm_runtime_get_llvm_stack(wasm_module_inst_t module);
void
wasm_runtime_set_llvm_stack(wasm_module_inst_t module, uint32 llvm_stack);
uint32
wasm_runtime_module_realloc(wasm_module_inst_t module, uint32 ptr, uint32 size,
void **p_native_addr);
@ -841,87 +829,6 @@ isalnum_wrapper(wasm_exec_env_t exec_env, int32 c)
return isalnum(c);
}
static void
setTempRet0_wrapper(wasm_exec_env_t exec_env, uint32 temp_ret)
{
wasm_module_inst_t module_inst = get_module_inst(exec_env);
wasm_runtime_set_temp_ret(module_inst, temp_ret);
}
static uint32
getTempRet0_wrapper(wasm_exec_env_t exec_env)
{
wasm_module_inst_t module_inst = get_module_inst(exec_env);
return wasm_runtime_get_temp_ret(module_inst);
}
static uint32
llvm_bswap_i16_wrapper(wasm_exec_env_t exec_env, uint32 data)
{
return (data & 0xFFFF0000) | ((data & 0xFF) << 8) | ((data & 0xFF00) >> 8);
}
static uint32
llvm_bswap_i32_wrapper(wasm_exec_env_t exec_env, uint32 data)
{
return ((data & 0xFF) << 24) | ((data & 0xFF00) << 8)
| ((data & 0xFF0000) >> 8) | ((data & 0xFF000000) >> 24);
}
static uint32
bitshift64Lshr_wrapper(wasm_exec_env_t exec_env, uint32 uint64_part0,
uint32 uint64_part1, uint32 bits)
{
wasm_module_inst_t module_inst = get_module_inst(exec_env);
union {
uint64 value;
uint32 parts[2];
} u;
u.parts[0] = uint64_part0;
u.parts[1] = uint64_part1;
u.value >>= bits;
/* return low 32bit and save high 32bit to temp ret */
wasm_runtime_set_temp_ret(module_inst, (uint32)(u.value >> 32));
return (uint32)u.value;
}
static uint32
bitshift64Shl_wrapper(wasm_exec_env_t exec_env, uint32 int64_part0,
uint32 int64_part1, uint32 bits)
{
wasm_module_inst_t module_inst = get_module_inst(exec_env);
union {
int64 value;
uint32 parts[2];
} u;
u.parts[0] = int64_part0;
u.parts[1] = int64_part1;
u.value <<= bits;
/* return low 32bit and save high 32bit to temp ret */
wasm_runtime_set_temp_ret(module_inst, (uint32)(u.value >> 32));
return (uint32)u.value;
}
static void
llvm_stackrestore_wrapper(wasm_exec_env_t exec_env, uint32 llvm_stack)
{
wasm_module_inst_t module_inst = get_module_inst(exec_env);
os_printf("_llvm_stackrestore called!\n");
wasm_runtime_set_llvm_stack(module_inst, llvm_stack);
}
static uint32
llvm_stacksave_wrapper(wasm_exec_env_t exec_env)
{
wasm_module_inst_t module_inst = get_module_inst(exec_env);
os_printf("_llvm_stacksave called!\n");
return wasm_runtime_get_llvm_stack(module_inst);
}
static uint32
emscripten_memcpy_big_wrapper(wasm_exec_env_t exec_env, void *dst,
const void *src, uint32 size)
@ -1108,14 +1015,6 @@ static NativeSymbol native_symbols_libc_builtin[] = {
REG_NATIVE_FUNC(tolower, "(i)i"),
REG_NATIVE_FUNC(toupper, "(i)i"),
REG_NATIVE_FUNC(isalnum, "(i)i"),
REG_NATIVE_FUNC(setTempRet0, "(i)"),
REG_NATIVE_FUNC(getTempRet0, "()i"),
REG_NATIVE_FUNC(llvm_bswap_i16, "(i)i"),
REG_NATIVE_FUNC(llvm_bswap_i32, "(i)i"),
REG_NATIVE_FUNC(bitshift64Lshr, "(iii)i"),
REG_NATIVE_FUNC(bitshift64Shl, "(iii)i"),
REG_NATIVE_FUNC(llvm_stackrestore, "(i)"),
REG_NATIVE_FUNC(llvm_stacksave, "()i"),
REG_NATIVE_FUNC(emscripten_memcpy_big, "(**~)i"),
REG_NATIVE_FUNC(abort, "(i)"),
REG_NATIVE_FUNC(abortStackOverflow, "(i)"),

View File

@ -25,8 +25,8 @@
// On Linux, prefer to use getrandom, though it isn't available in
// GLIBC before 2.25.
#if defined(__linux__) \
&& (!defined(__GLIBC__) || __GLIBC__ > 2 \
#if (defined(__linux__) || defined(ESP_PLATFORM)) \
&& (!defined(__GLIBC__) || __GLIBC__ > 2 \
|| (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 25))
#define CONFIG_HAS_GETRANDOM 1
#else
@ -39,13 +39,14 @@
#define CONFIG_HAS_CAP_ENTER 0
#endif
#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__EMSCRIPTEN__)
#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__EMSCRIPTEN__) \
&& !defined(ESP_PLATFORM)
#define CONFIG_HAS_CLOCK_NANOSLEEP 1
#else
#define CONFIG_HAS_CLOCK_NANOSLEEP 0
#endif
#if !defined(__APPLE__) && !defined(__FreeBSD__)
#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(ESP_PLATFORM)
#define CONFIG_HAS_FDATASYNC 1
#else
#define CONFIG_HAS_FDATASYNC 0
@ -63,13 +64,13 @@
#endif
#endif
#ifndef __APPLE__
#if !defined(__APPLE__) && !defined(ESP_PLATFORM)
#define CONFIG_HAS_POSIX_FALLOCATE 1
#else
#define CONFIG_HAS_POSIX_FALLOCATE 0
#endif
#ifndef __APPLE__
#if !defined(__APPLE__) && !defined(ESP_PLATFORM)
#define CONFIG_HAS_PREADV 1
#else
#define CONFIG_HAS_PREADV 0
@ -87,7 +88,7 @@
#define CONFIG_HAS_PTHREAD_CONDATTR_SETCLOCK 0
#endif
#ifndef __APPLE__
#if !defined(__APPLE__) && !defined(ESP_PLATFORM)
#define CONFIG_HAS_PWRITEV 1
#else
#define CONFIG_HAS_PWRITEV 0

View File

@ -55,6 +55,7 @@ typedef pthread_t korp_tid;
typedef pthread_mutex_t korp_mutex;
typedef pthread_cond_t korp_cond;
typedef pthread_t korp_thread;
typedef sem_t korp_sem;
#define os_thread_local_attribute __thread

View File

@ -195,6 +195,48 @@ os_cond_wait(korp_cond *cond, korp_mutex *mutex)
return BHT_OK;
}
korp_sem *
os_sem_open(const char *name, int oflags, int mode, int val)
{
return sem_open(name, oflags, mode, val);
}
int
os_sem_close(korp_sem *sem)
{
return sem_close(sem);
}
int
os_sem_wait(korp_sem *sem)
{
return sem_wait(sem);
}
int
os_sem_trywait(korp_sem *sem)
{
return sem_trywait(sem);
}
int
os_sem_post(korp_sem *sem)
{
return sem_post(sem);
}
int
os_sem_getvalue(korp_sem *sem, int *sval)
{
return sem_getvalue(sem, sval);
}
int
os_sem_unlink(const char *name)
{
return sem_unlink(name);
}
static void
msec_nsec_to_abstime(struct timespec *ts, uint64 usec)
{

View File

@ -56,6 +56,7 @@ typedef pthread_t korp_tid;
typedef pthread_mutex_t korp_mutex;
typedef pthread_cond_t korp_cond;
typedef pthread_t korp_thread;
typedef sem_t korp_sem;
#define os_thread_local_attribute __thread

View File

@ -58,3 +58,193 @@ os_usleep(uint32 usec)
{
return usleep(usec);
}
/* Below parts of readv & writev are ported from Nuttx, under Apache License
* v2.0 */
ssize_t
readv(int fildes, const struct iovec *iov, int iovcnt)
{
ssize_t ntotal;
ssize_t nread;
size_t remaining;
uint8_t *buffer;
int i;
/* Process each entry in the struct iovec array */
for (i = 0, ntotal = 0; i < iovcnt; i++) {
/* Ignore zero-length reads */
if (iov[i].iov_len > 0) {
buffer = iov[i].iov_base;
remaining = iov[i].iov_len;
/* Read repeatedly as necessary to fill buffer */
do {
/* NOTE: read() is a cancellation point */
nread = read(fildes, buffer, remaining);
/* Check for a read error */
if (nread < 0) {
return nread;
}
/* Check for an end-of-file condition */
else if (nread == 0) {
return ntotal;
}
/* Update pointers and counts in order to handle partial
* buffer reads.
*/
buffer += nread;
remaining -= nread;
ntotal += nread;
} while (remaining > 0);
}
}
return ntotal;
}
ssize_t
writev(int fildes, const struct iovec *iov, int iovcnt)
{
ssize_t ntotal;
ssize_t nwritten;
size_t remaining;
uint8_t *buffer;
int i;
/* Process each entry in the struct iovec array */
for (i = 0, ntotal = 0; i < iovcnt; i++) {
/* Ignore zero-length writes */
if (iov[i].iov_len > 0) {
buffer = iov[i].iov_base;
remaining = iov[i].iov_len;
/* Write repeatedly as necessary to write the entire buffer */
do {
/* NOTE: write() is a cancellation point */
nwritten = write(fildes, buffer, remaining);
/* Check for a write error */
if (nwritten < 0) {
return ntotal ? ntotal : -1;
}
/* Update pointers and counts in order to handle partial
* buffer writes.
*/
buffer += nwritten;
remaining -= nwritten;
ntotal += nwritten;
} while (remaining > 0);
}
}
return ntotal;
}
int
openat(int fd, const char *path, int oflags, ...)
{
errno = ENOSYS;
return -1;
}
int
fstatat(int fd, const char *path, struct stat *buf, int flag)
{
errno = ENOSYS;
return -1;
}
int
mkdirat(int fd, const char *path, mode_t mode)
{
errno = ENOSYS;
return -1;
}
ssize_t
readlinkat(int fd, const char *path, char *buf, size_t bufsize)
{
errno = EINVAL;
return -1;
}
int
linkat(int fd1, const char *path1, int fd2, const char *path2, int flag)
{
errno = ENOSYS;
return -1;
}
int
renameat(int fromfd, const char *from, int tofd, const char *to)
{
errno = ENOSYS;
return -1;
}
int
symlinkat(const char *target, int fd, const char *path)
{
errno = ENOSYS;
return -1;
}
int
unlinkat(int fd, const char *path, int flag)
{
errno = ENOSYS;
return -1;
}
int
utimensat(int fd, const char *path, const struct timespec ts[2], int flag)
{
errno = ENOSYS;
return -1;
}
DIR *
fdopendir(int fd)
{
errno = ENOSYS;
return NULL;
}
int
ftruncate(int fd, off_t length)
{
errno = ENOSYS;
return -1;
}
int
futimens(int fd, const struct timespec times[2])
{
errno = ENOSYS;
return -1;
}
int
nanosleep(const struct timespec *req, struct timespec *rem)
{
errno = ENOSYS;
return -1;
}

View File

@ -44,6 +44,25 @@ os_mutex_init(korp_mutex *mutex)
return pthread_mutex_init(mutex, NULL);
}
int
os_recursive_mutex_init(korp_mutex *mutex)
{
int ret;
pthread_mutexattr_t mattr;
assert(mutex);
ret = pthread_mutexattr_init(&mattr);
if (ret)
return BHT_ERROR;
pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
ret = pthread_mutex_init(mutex, &mattr);
pthread_mutexattr_destroy(&mattr);
return ret == 0 ? BHT_OK : BHT_ERROR;
}
int
os_mutex_destroy(korp_mutex *mutex)
{
@ -206,3 +225,9 @@ os_cond_signal(korp_cond *cond)
{
return pthread_cond_signal(cond);
}
int
os_cond_broadcast(korp_cond *cond)
{
return pthread_cond_broadcast(cond);
}

View File

@ -17,6 +17,10 @@
#include <math.h>
#include <unistd.h>
#include <pthread.h>
#include <arpa/inet.h>
#include <sys/socket.h>
#include <sys/uio.h>
#include <dirent.h>
#include "esp_pthread.h"
#include "esp_timer.h"
@ -35,12 +39,73 @@ typedef pthread_t korp_tid;
typedef pthread_mutex_t korp_mutex;
typedef pthread_cond_t korp_cond;
typedef pthread_t korp_thread;
typedef unsigned int korp_sem;
#define BH_APPLET_PRESERVED_STACK_SIZE (2 * BH_KB)
/* Default thread priority */
#define BH_THREAD_DEFAULT_PRIORITY 5
/* Special value for tv_nsec field of timespec */
#define UTIME_NOW ((1l << 30) - 1l)
#ifndef __cplusplus
#define UTIME_OMIT ((1l << 30) - 2l)
#endif
#ifdef DT_UNKNOWN
#undef DT_UNKNOWN
#endif
#ifdef DT_REG
#undef DT_REG
#endif
#ifdef DT_DIR
#undef DT_DIR
#endif
/* Below parts of d_type define are ported from Nuttx, under Apache License v2.0
*/
/* File type code for the d_type field in dirent structure.
* Note that because of the simplified filesystem organization of the NuttX,
* top-level, pseudo-file system, an inode can be BOTH a file and a directory
*/
#define DTYPE_UNKNOWN 0
#define DTYPE_FIFO 1
#define DTYPE_CHR 2
#define DTYPE_SEM 3
#define DTYPE_DIRECTORY 4
#define DTYPE_MQ 5
#define DTYPE_BLK 6
#define DTYPE_SHM 7
#define DTYPE_FILE 8
#define DTYPE_MTD 9
#define DTYPE_LINK 10
#define DTYPE_SOCK 12
/* The d_type field of the dirent structure is not specified by POSIX. It
* is a non-standard, 4.5BSD extension that is implemented by most OSs. A
* POSIX compliant OS may not implement the d_type field at all. Many OS's
* (including glibc) may use the following alternative naming for the file
* type names:
*/
#define DT_UNKNOWN DTYPE_UNKNOWN
#define DT_FIFO DTYPE_FIFO
#define DT_CHR DTYPE_CHR
#define DT_SEM DTYPE_SEM
#define DT_DIR DTYPE_DIRECTORY
#define DT_MQ DTYPE_MQ
#define DT_BLK DTYPE_BLK
#define DT_SHM DTYPE_SHM
#define DT_REG DTYPE_FILE
#define DT_MTD DTYPE_MTD
#define DT_LNK DTYPE_LINK
#define DT_SOCK DTYPE_SOCK
#ifdef __cplusplus
}
#endif

View File

@ -195,6 +195,91 @@ os_cond_signal(korp_cond *cond);
int
os_cond_broadcast(korp_cond *cond);
/**
* Creates a new POSIX-like semaphore or opens an existing
* semaphore. The semaphore is identified by name. For details of
* the construction of name, please refer to
* https://man7.org/linux/man-pages/man3/sem_open.3.html.
*
* @param name semaphore name
* @param oflasg specifies flags that control the operation of the call
* @param mode permission flags
* @param val initial value of the named semaphore.
*
* @return korp_sem * if success, NULL otherwise
*/
korp_sem *
os_sem_open(const char *name, int oflags, int mode, int val);
/**
* Closes the named semaphore referred to by sem,
* allowing any resources that the system has allocated to the
* calling process for this semaphore to be freed.
*
* @param sem
*
* @return 0 if success
*/
int
os_sem_close(korp_sem *sem);
/**
* Decrements (locks) the semaphore pointed to by sem.
* If the semaphore's value is greater than zero, then the decrement
* proceeds, and the function returns, immediately. If the
* semaphore currently has the value zero, then the call blocks
* until either it becomes possible to perform the decrement (i.e.,
* the semaphore value rises above zero), or a signal handler
* interrupts the call.
*
* @return 0 if success
*/
int
os_sem_wait(korp_sem *sem);
/**
* Is the same as sem_wait(), except that if the
* decrement cannot be immediately performed, then call returns an
* error (errno set to EAGAIN) instead of blocking.
*
* @return 0 if success
*/
int
os_sem_trywait(korp_sem *sem);
/**
* Increments (unlocks) the semaphore pointed to by sem.
* If the semaphore's value consequently becomes greater than zero,
* then another process or thread blocked in a sem_wait(3) call will
* be woken up and proceed to lock the semaphore.
*
* @return 0 if success
*/
int
os_sem_post(korp_sem *sem);
/**
* Places the current value of the semaphore pointed
* to sem into the integer pointed to by sval.
*
* @return 0 if success
*/
int
os_sem_getvalue(korp_sem *sem, int *sval);
/**
* Remove the named semaphore referred to by name.
* The semaphore name is removed immediately. The semaphore is
* destroyed once all other processes that have the semaphore open
* close it.
*
* @param name semaphore name
*
* @return 0 if success
*/
int
os_sem_unlink(const char *name);
/****************************************************
* Section 2 *
* Socket support *

View File

@ -50,8 +50,9 @@ typedef pthread_t korp_thread;
typedef pthread_t korp_tid;
typedef pthread_mutex_t korp_mutex;
typedef pthread_cond_t korp_cond;
typedef unsigned int korp_sem;
typedef void (*os_print_function_t)(const char *message);
typedef int (*os_print_function_t)(const char *message);
void
os_set_print_function(os_print_function_t pf);

View File

@ -7,8 +7,6 @@
#include "platform_api_extension.h"
#include "sgx_rsrv_mem_mngr.h"
#define FIXED_BUFFER_SIZE (1 << 9)
static os_print_function_t print_function = NULL;
int
@ -57,31 +55,37 @@ os_set_print_function(os_print_function_t pf)
print_function = pf;
}
#define FIXED_BUFFER_SIZE 4096
int
os_printf(const char *message, ...)
{
int bytes_written = 0;
if (print_function != NULL) {
char msg[FIXED_BUFFER_SIZE] = { '\0' };
va_list ap;
va_start(ap, message);
vsnprintf(msg, FIXED_BUFFER_SIZE, message, ap);
va_end(ap);
print_function(msg);
bytes_written += print_function(msg);
}
return 0;
return bytes_written;
}
int
os_vprintf(const char *format, va_list arg)
{
int bytes_written = 0;
if (print_function != NULL) {
char msg[FIXED_BUFFER_SIZE] = { '\0' };
vsnprintf(msg, FIXED_BUFFER_SIZE, format, arg);
print_function(msg);
bytes_written += print_function(msg);
}
return 0;
return bytes_written;
}
char *

View File

@ -55,6 +55,7 @@ typedef pthread_t korp_tid;
typedef pthread_mutex_t korp_mutex;
typedef pthread_cond_t korp_cond;
typedef pthread_t korp_thread;
typedef sem_t korp_sem;
#define os_thread_local_attribute __thread

View File

@ -25,6 +25,7 @@
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <semaphore.h>
#ifdef __cplusplus
extern "C" {
@ -38,6 +39,7 @@ typedef pthread_t korp_tid;
typedef pthread_mutex_t korp_mutex;
typedef pthread_cond_t korp_cond;
typedef pthread_t korp_thread;
typedef sem_t korp_sem;
#define BH_APPLET_PRESERVED_STACK_SIZE (2 * BH_KB)

View File

@ -38,6 +38,7 @@
typedef thread_t korp_thread;
typedef kernel_pid_t korp_tid;
typedef mutex_t korp_mutex;
typedef unsigned int korp_sem;
/* typedef sema_t korp_sem; */

View File

@ -36,6 +36,7 @@ typedef rt_thread_t korp_tid;
typedef struct rt_mutex korp_mutex;
typedef struct rt_thread korp_cond;
typedef struct rt_thread korp_thread;
typedef unsigned int korp_sem;
typedef rt_uint8_t uint8_t;
typedef rt_int8_t int8_t;

View File

@ -54,6 +54,7 @@ typedef pthread_t korp_tid;
typedef pthread_mutex_t korp_mutex;
typedef pthread_cond_t korp_cond;
typedef pthread_t korp_thread;
typedef sem_t korp_sem;
#define os_thread_local_attribute __thread

Some files were not shown because too many files have changed in this diff Show More