Merge branch main into dev/wasi_threads

This commit is contained in:
Wenyong Huang 2023-02-17 08:46:12 +08:00
commit e170c355a2
163 changed files with 7153 additions and 1857 deletions

View File

@ -12,7 +12,7 @@ ENV TZ=Asian/Shanghai
# hadolint ignore=DL3008
RUN apt-get update \
&& apt-get install -y apt-transport-https apt-utils build-essential \
ca-certificates curl g++-multilib git gnupg \
ca-certificates ccache curl g++-multilib git gnupg \
libgcc-9-dev lib32gcc-9-dev lsb-release \
ninja-build ocaml ocamlbuild python2.7 \
software-properties-common tree tzdata \
@ -20,6 +20,15 @@ RUN apt-get update \
&& apt-get clean -y \
&& rm -rf /var/lib/apt/lists/*
#
# binaryen
ARG BINARYEN_VER=111
WORKDIR /opt
RUN wget -c --progress=dot:giga https://github.com/WebAssembly/binaryen/releases/download/version_${BINARYEN_VER}/binaryen-version_${BINARYEN_VER}-x86_64-linux.tar.gz \
&& tar xf binaryen-version_${BINARYEN_VER}-x86_64-linux.tar.gz \
&& ln -sf /opt/binaryen-version_111 /opt/binaryen \
&& rm binaryen-version_${BINARYEN_VER}-x86_64-linux.tar.gz
#
# CMAKE (https://apt.kitware.com/)
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
@ -38,18 +47,19 @@ RUN wget --progress=dot:giga -O - https://apt.kitware.com/keys/kitware-archive-l
WORKDIR /opt
RUN git clone https://github.com/emscripten-core/emsdk.git
ARG EMSDK_VER=3.0.0
WORKDIR /opt/emsdk
RUN git pull \
&& ./emsdk install 2.0.26 \
&& ./emsdk activate 2.0.26 \
&& ./emsdk install ${EMSDK_VER} \
&& ./emsdk activate ${EMSDK_VER} \
&& echo "source /opt/emsdk/emsdk_env.sh" >> /root/.bashrc
#
# install wasi-sdk
ARG WASI_SDK_VER=16
ARG WASI_SDK_VER=19
RUN wget -c --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-${WASI_SDK_VER}/wasi-sdk-${WASI_SDK_VER}.0-linux.tar.gz -P /opt \
&& tar xf /opt/wasi-sdk-${WASI_SDK_VER}.0-linux.tar.gz -C /opt \
&& ln -fs /opt/wasi-sdk-${WASI_SDK_VER}.0 /opt/wasi-sdk \
&& ln -sf /opt/wasi-sdk-${WASI_SDK_VER}.0 /opt/wasi-sdk \
&& rm /opt/wasi-sdk-${WASI_SDK_VER}.0-linux.tar.gz
#
@ -57,7 +67,7 @@ RUN wget -c --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases
ARG WABT_VER=1.0.29
RUN wget -c --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/${WABT_VER}/wabt-${WABT_VER}-ubuntu.tar.gz -P /opt \
&& tar xf /opt/wabt-${WABT_VER}-ubuntu.tar.gz -C /opt \
&& ln -fs /opt/wabt-${WABT_VER} /opt/wabt \
&& ln -sf /opt/wabt-${WABT_VER} /opt/wabt \
&& rm /opt/wabt-${WABT_VER}-ubuntu.tar.gz
#
@ -70,6 +80,8 @@ RUN mkdir /opt/bazelisk \
#
# install clang+llvm
ARG LLVM_VER=14
RUN apt-get purge -y clang-10 llvm-10 && apt autoremove -y
WORKDIR /etc/apt/apt.conf.d
RUN touch 99verfiy-peer.conf \
&& echo "Acquire { https::Verify-Peer false }" > 99verfiy-peer.conf
@ -77,9 +89,7 @@ RUN touch 99verfiy-peer.conf \
WORKDIR /tmp
RUN wget --progress=dot:giga https://apt.llvm.org/llvm.sh \
&& chmod a+x ./llvm.sh \
&& /tmp/llvm.sh 12 all \
&& ln -sf /usr/bin/clang-format-12 /usr/bin/clang-format \
&& rm -rf /tmp/*
&& ./llvm.sh ${LLVM_VER} all
#
# [Optional]
@ -96,17 +106,28 @@ RUN apt-get update \
# Install required python packages
# hadolint ignore=DL3013
RUN python3 -m pip install --no-cache-dir --upgrade pip \
&& pip3 install --no-cache-dir --user black nose pycparser pylint
&& pip3 install --no-cache-dir black nose pycparser pylint
# set path, PS and clean up
ENV PATH "/opt/bazelisk:/opt/clang-llvm/bin:${PATH}"
RUN echo "export PATH=/opt/bazelisk:/opt/clang-llvm/bin:${PATH}" >> /root/.bashrc \
&& printf "%s\n" "PS1='\n[ \u@wamr-dev-docker \W ]\n$ '" >> /root/.bashrc \
#
# Install github-cli. It doens't work as a feature of devcontainer.json
RUN cd /tmp \
&& wget https://github.com/cli/cli/releases/download/v2.20.2/gh_2.20.2_linux_amd64.deb \
&& dpkg -i gh_2.20.2_linux_amd64.deb
#
# Install NodeJS
RUN curl -fsSL https://deb.nodesource.com/setup_19.x | bash -
RUN apt-get install -y nodejs
# set path
ENV PATH="/opt/bazelisk:/usr/lib/llvm-${LLVM_VER}/bin:${PATH}"
ENV CC=/usr/lib/llvm-${LLVM_VER}/bin/clang CXX=/usr/lib/llvm-${LLVM_VER}/bin/clang++
RUN printf "%s\n" "PS1='\n[ \u@wamr-dev-docker \W ]\n$ '" >> /root/.bashrc \
&& apt-get autoremove -y \
&& apt-get clean -y \
&& rm -rf /var/lib/apt/lists/* \
&& rm -rf /tmp/*
# set workdir when container run
VOLUME /workspace
WORKDIR /workspace
VOLUME /workspaces
WORKDIR /workspaces

View File

@ -1,6 +1,5 @@
// Copyright (C) 2019 Intel Corporation. All rights reserved.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// For format details, see https://aka.ms/vscode-remote/devcontainer.json or this file's README at:
// https://github.com/microsoft/vscode-dev-containers/tree/v0.195.0/containers/cpp
{
@ -10,7 +9,12 @@
// Update 'VARIANT' to pick an Debian / Ubuntu OS version: debian-11, debian-10, debian-9, ubuntu-21.04, ubuntu-20.04, ubuntu-18.04
// Use Debian 11, Debian 9, Ubuntu 18.04 or Ubuntu 21.04 on local arm64/Apple Silicon
"args": {
"VARIANT": "ubuntu-20.04"
"BINARYEN_VER": "111",
"EMSDK_VER": "3.0.0",
"LLVM_VER": "15",
"VARIANT": "ubuntu-20.04",
"WASI_SDK_VER": "19",
"WABT_VER": "1.0.31"
}
},
"runArgs": [
@ -27,12 +31,10 @@
// Add the IDs of extensions you want installed when the container is created.
"extensions": [
"dtsvet.vscode-wasm",
"esbenp.prettier-vscode",
"llvm-vs-code-extensions.vscode-clangd",
"ms-python.python",
"ms-python.vscode-pylance",
"ms-vscode.cmake-tools",
"ms-vscode.cpptools",
"twxs.cmake"
]
}
},

View File

@ -36,7 +36,7 @@ jobs:
- name: generate iwasm binary release
run: |
cmake -S . -B build \
-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_INTERP=1 -DWAMR_BUILD_JIT=0 \
-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_INTERP=1 -DWAMR_BUILD_FAST_JIT=1 -DWAMR_BUILD_JIT=1 \
-DWAMR_BUILD_CUSTOM_NAME_SECTION=0 \
-DWAMR_BUILD_DEBUG_INTERP=0 \
-DWAMR_BUILD_DEBUG_AOT=0 \

View File

@ -5,23 +5,46 @@ name: Reusable workflow-build_llvm_libraries
on:
workflow_call:
inputs:
runs-on:
os:
required: true
type: string
arch:
required: true
type: string
outputs:
cache_key:
description: "A cached key of LLVM libraries"
value: ${{ jobs.build_llvm_libraries.outputs.key}}
jobs:
build_llvm_libraries:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: ${{ fromJson(inputs.runs-on) }}
runs-on: ${{ inputs.os }}
outputs:
key: ${{ steps.create_lib_cache_key.outputs.key}}
steps:
- name: checkout
uses: actions/checkout@v3
- name: install dependencies
run: /usr/bin/env python3 -m pip install -r requirements.txt
working-directory: build-scripts
- name: retrive the last commit ID
id: get_last_commit
run: echo "last_commit=$(GH_TOKEN=${{ secrets.GITHUB_TOKEN }} /usr/bin/env python3 ./build_llvm.py --llvm-ver)" >> $GITHUB_OUTPUT
working-directory: build-scripts
# Bump the prefix number to evict all previous caches and
# enforce a clean build, in the unlikely case that some
# weird build error occur and llvm/build becomes a potential
# suspect.
- name: form the cache key of libraries
id: create_lib_cache_key
run: echo "key=0-llvm-libraries-${{ inputs.os }}-${{ inputs.arch }}-${{ steps.get_last_commit.outputs.last_commit }}" >> $GITHUB_OUTPUT
- name: Cache LLVM libraries
id: cache_llvm
id: retrieve_llvm_libs
uses: actions/cache@v3
with:
path: |
@ -30,10 +53,39 @@ jobs:
./core/deps/llvm/build/lib
./core/deps/llvm/build/libexec
./core/deps/llvm/build/share
key: ${{ matrix.os }}-build-llvm_libraries_ex
key: ${{ steps.create_lib_cache_key.outputs.key}}
- name: Build llvm
id: build_llvm
if: ${{ steps.cache_llvm.outputs.cache-hit != 'true' }}
run: /usr/bin/env python3 ./build_llvm.py --arch X86 WebAssembly
- uses: actions/cache@v3
with:
path: ~/.ccache
key: 0-ccache-${{ inputs.os }}-${{ steps.get_last_commit.outputs.last_commit }}
restore-keys: |
0-ccache-${{ inputs.os }}
if: steps.retrieve_llvm_libs.outputs.cache-hit != 'true' && inputs.os == 'ubuntu-20.04'
- uses: actions/cache@v3
with:
path: ~/.cache/ccache
key: 0-ccache-${{ inputs.os }}-${{ steps.get_last_commit.outputs.last_commit }}
restore-keys: |
0-ccache-${{ inputs.os }}
if: steps.retrieve_llvm_libs.outputs.cache-hit != 'true' && inputs.os == 'ubuntu-22.04'
- run: sudo apt install -y ccache ninja-build
if: steps.retrieve_llvm_libs.outputs.cache-hit != 'true' && startsWith(inputs.os, 'ubuntu')
- uses: actions/cache@v3
with:
path: ~/Library/Caches/ccache
key: 0-ccache-${{ inputs.os }}-${{ steps.get_last_commit.outputs.last_commit }}
restore-keys: |
0-ccache-${{ inputs.os }}
if: steps.retrieve_llvm_libs.outputs.cache-hit != 'true' && startsWith(inputs.os, 'macos')
- run: brew install ccache ninja
if: steps.retrieve_llvm_libs.outputs.cache-hit != 'true' && startsWith(inputs.os, 'macos')
- name: Build LLVM libraries
if: steps.retrieve_llvm_libs.outputs.cache-hit != 'true'
run: /usr/bin/env python3 ./build_llvm.py --arch ${{ inputs.arch }}
working-directory: build-scripts

View File

@ -15,7 +15,7 @@ concurrency:
cancel-in-progress: true
jobs:
complinace_job:
compliance_job:
runs-on: ubuntu-latest
steps:
- name: checkout

View File

@ -53,8 +53,7 @@ env:
FAST_JIT_BUILD_OPTIONS: " -DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=0 -DWAMR_BUILD_FAST_JIT=1 -DWAMR_BUILD_JIT=0 -DWAMR_BUILD_LAZY_JIT=0"
LLVM_LAZY_JIT_BUILD_OPTIONS: " -DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=0 -DWAMR_BUILD_FAST_JIT=0 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=1"
LLVM_EAGER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=0 -DWAMR_BUILD_FAST_JIT=0 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=0"
# LLVM
LLVM_CACHE_SUFFIX: "build-llvm_libraries_ex"
MULTI_TIER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=1 -DWAMR_BUILD_FAST_JIT=1 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=1"
# For Spec Test
DEFAULT_TEST_OPTIONS: "-s spec -b -P"
MULTI_MODULES_TEST_OPTIONS: "-s spec -b -M -P"
@ -64,23 +63,37 @@ env:
WASI_TEST_OPTIONS: "-s wasi_certification"
jobs:
build_llvm_libraries:
build_llvm_libraries_on_ubuntu_2004:
uses: ./.github/workflows/build_llvm_libraries.yml
with:
runs-on: "['ubuntu-20.04', 'ubuntu-22.04']"
os: "ubuntu-20.04"
arch: "X86"
build_llvm_libraries_on_ubuntu_2204:
uses: ./.github/workflows/build_llvm_libraries.yml
with:
os: "ubuntu-22.04"
arch: "X86"
build_wamrc:
needs: [build_llvm_libraries]
needs:
[build_llvm_libraries_on_ubuntu_2004, build_llvm_libraries_on_ubuntu_2204]
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-20.04, ubuntu-22.04]
include:
- os: ubuntu-20.04
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2004.outputs.cache_key }}
- os: ubuntu-22.04
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2204.outputs.cache_key }}
steps:
- name: checkout
uses: actions/checkout@v3
# since jobs.id can't contain the dot character
# it is hard to use `format` to assemble the cache key
- name: Get LLVM libraries
id: cache_llvm
id: retrieve_llvm_libs
uses: actions/cache@v3
with:
path: |
@ -89,10 +102,10 @@ jobs:
./core/deps/llvm/build/lib
./core/deps/llvm/build/libexec
./core/deps/llvm/build/share
key: ${{ matrix.os }}-${{ env.LLVM_CACHE_SUFFIX }}
key: ${{ matrix.llvm_cache_key }}
- name: Quit if cache miss
if: steps.cache_llvm.outputs.cache-hit != 'true'
if: steps.retrieve_llvm_libs.outputs.cache-hit != 'true'
run: echo "::error::can not get prebuilt llvm libraries" && exit 1
- name: Build wamrc
@ -103,7 +116,8 @@ jobs:
working-directory: wamr-compiler
build_iwasm:
needs: [build_llvm_libraries]
needs:
[build_llvm_libraries_on_ubuntu_2004, build_llvm_libraries_on_ubuntu_2204]
runs-on: ${{ matrix.os }}
strategy:
matrix:
@ -115,6 +129,7 @@ jobs:
$FAST_JIT_BUILD_OPTIONS,
$LLVM_LAZY_JIT_BUILD_OPTIONS,
$LLVM_EAGER_JIT_BUILD_OPTIONS,
$MULTI_TIER_JIT_BUILD_OPTIONS,
]
make_options_feature: [
# Features
@ -147,6 +162,8 @@ jobs:
make_options_feature: "-DWAMR_BUILD_MULTI_MODULE=1"
- make_options_run_mode: $LLVM_EAGER_JIT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_MULTI_MODULE=1"
- make_options_run_mode: $MULTI_TIER_JIT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_MULTI_MODULE=1"
# SIMD only on JIT/AOT mode
- make_options_run_mode: $CLASSIC_INTERP_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_SIMD=1"
@ -163,6 +180,8 @@ jobs:
make_options_feature: "-DWAMR_BUILD_DEBUG_INTERP=1"
- make_options_run_mode: $LLVM_EAGER_JIT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_DEBUG_INTERP=1"
- make_options_run_mode: $MULTI_TIER_JIT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_DEBUG_INTERP=1"
# DEBUG_AOT only on JIT/AOT mode
- make_options_run_mode: $CLASSIC_INTERP_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_DEBUG_AOT=1"
@ -175,6 +194,8 @@ jobs:
make_options_feature: "-DWAMR_BUILD_DEBUG_AOT=1"
- make_options_run_mode: $LLVM_EAGER_JIT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_DEBUG_AOT=1"
- make_options_run_mode: $MULTI_TIER_JIT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_DEBUG_AOT=1"
# MINI_LOADER only on INTERP mode
- make_options_run_mode: $AOT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_MINI_LOADER=1"
@ -184,16 +205,28 @@ jobs:
make_options_feature: "-DWAMR_BUILD_MINI_LOADER=1"
- make_options_run_mode: $LLVM_EAGER_JIT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_MINI_LOADER=1"
# Fast-JIT mode doesn't support android(X86-32)
- make_options_run_mode: $MULTI_TIER_JIT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_MINI_LOADER=1"
# Fast-JIT and Multi-Tier-JIT mode don't support android(X86-32)
- make_options_run_mode: $FAST_JIT_BUILD_OPTIONS
platform: android
- make_options_run_mode: $MULTI_TIER_JIT_BUILD_OPTIONS
platform: android
# only test andorid on ubuntu latest
- os: ubuntu-20.04
platform: android
include:
- os: ubuntu-20.04
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2004.outputs.cache_key }}
- os: ubuntu-22.04
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2204.outputs.cache_key }}
steps:
- name: checkout
uses: actions/checkout@v3
# only download llvm cache when needed
- name: Get LLVM libraries
id: cache_llvm
id: retrieve_llvm_libs
if: endsWith(matrix.make_options_run_mode, '_JIT_BUILD_OPTIONS')
uses: actions/cache@v3
with:
@ -203,10 +236,10 @@ jobs:
./core/deps/llvm/build/lib
./core/deps/llvm/build/libexec
./core/deps/llvm/build/share
key: ${{ matrix.os }}-${{ env.LLVM_CACHE_SUFFIX }}
key: ${{ matrix.llvm_cache_key }}
- name: Quit if cache miss
if: endsWith(matrix.make_options_run_mode, '_JIT_BUILD_OPTIONS') && (steps.cache_llvm.outputs.cache-hit != 'true')
if: endsWith(matrix.make_options_run_mode, '_JIT_BUILD_OPTIONS') && (steps.retrieve_llvm_libs.outputs.cache-hit != 'true')
run: echo "::error::can not get prebuilt llvm libraries" && exit 1
- name: Build iwasm
@ -217,7 +250,13 @@ jobs:
working-directory: product-mini/platforms/${{ matrix.platform }}
build_samples_wasm_c_api:
needs: [build_iwasm, build_llvm_libraries, build_wamrc]
needs:
[
build_iwasm,
build_llvm_libraries_on_ubuntu_2004,
build_llvm_libraries_on_ubuntu_2204,
build_wamrc,
]
runs-on: ${{ matrix.os }}
strategy:
matrix:
@ -229,22 +268,28 @@ jobs:
$FAST_JIT_BUILD_OPTIONS,
$LLVM_LAZY_JIT_BUILD_OPTIONS,
$LLVM_EAGER_JIT_BUILD_OPTIONS,
$MULTI_TIER_JIT_BUILD_OPTIONS,
]
os: [ubuntu-20.04, ubuntu-22.04]
wasi_sdk_release:
[
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-16/wasi-sdk-16.0-linux.tar.gz",
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-linux.tar.gz",
]
wabt_release:
[
"https://github.com/WebAssembly/wabt/releases/download/1.0.24/wabt-1.0.24-ubuntu.tar.gz",
"https://github.com/WebAssembly/wabt/releases/download/1.0.31/wabt-1.0.31-ubuntu.tar.gz",
]
include:
- os: ubuntu-20.04
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2004.outputs.cache_key }}
- os: ubuntu-22.04
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2204.outputs.cache_key }}
steps:
- name: checkout
uses: actions/checkout@v3
- name: Get LLVM libraries
id: cache_llvm
id: retrieve_llvm_libs
if: (!endsWith(matrix.make_options, '_INTERP_BUILD_OPTIONS'))
uses: actions/cache@v3
with:
@ -254,18 +299,18 @@ jobs:
./core/deps/llvm/build/lib
./core/deps/llvm/build/libexec
./core/deps/llvm/build/share
key: ${{ matrix.os }}-${{ env.LLVM_CACHE_SUFFIX }}
key: ${{ matrix.llvm_cache_key }}
- name: Quit if cache miss
if: (!endsWith(matrix.make_options, '_INTERP_BUILD_OPTIONS')) && (steps.cache_llvm.outputs.cache-hit != 'true')
if: (!endsWith(matrix.make_options, '_INTERP_BUILD_OPTIONS')) && (steps.retrieve_llvm_libs.outputs.cache-hit != 'true')
run: echo "::error::can not get prebuilt llvm libraries" && exit 1
- name: download and install wabt
run: |
cd /opt
sudo wget ${{ matrix.wabt_release }}
sudo tar -xzf wabt-1.0.24-*.tar.gz
sudo mv wabt-1.0.24 wabt
sudo tar -xzf wabt-1.0.31-*.tar.gz
sudo mv wabt-1.0.31 wabt
- name: Build wamrc
if: (!endsWith(matrix.make_options, '_INTERP_BUILD_OPTIONS'))
@ -277,19 +322,9 @@ jobs:
- name: Build Sample [wasm-c-api]
run: |
mkdir build && cd build
cmake .. ${{ matrix.make_options }}
cmake --build . --config Release --parallel 4
./callback
./callback_chain
./empty_imports
./global
./hello
./hostref
./memory
./reflect
./table
./trap
cmake -S . -B build ${{ matrix.make_options }}
cmake --build build --config Release --parallel 4
ctest --test-dir build
working-directory: samples/wasm-c-api
build_samples_others:
@ -298,14 +333,13 @@ jobs:
strategy:
matrix:
os: [ubuntu-20.04, ubuntu-22.04]
wasi_sdk_release:
[
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-16/wasi-sdk-16.0-linux.tar.gz",
]
wabt_release:
[
"https://github.com/WebAssembly/wabt/releases/download/1.0.24/wabt-1.0.24-ubuntu.tar.gz",
]
include:
- os: ubuntu-20.04
wasi_sdk_release: "https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-linux.tar.gz"
wabt_release: "https://github.com/WebAssembly/wabt/releases/download/1.0.31/wabt-1.0.31-ubuntu.tar.gz"
- os: ubuntu-22.04
wasi_sdk_release: "https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-linux.tar.gz"
wabt_release: "https://github.com/WebAssembly/wabt/releases/download/1.0.31/wabt-1.0.31-ubuntu.tar.gz"
steps:
- name: checkout
uses: actions/checkout@v3
@ -314,15 +348,15 @@ jobs:
run: |
cd /opt
sudo wget ${{ matrix.wasi_sdk_release }}
sudo tar -xzf wasi-sdk-16.0-*.tar.gz
sudo mv wasi-sdk-16.0 wasi-sdk
sudo tar -xzf wasi-sdk-*.tar.gz
sudo mv wasi-sdk-19.0 wasi-sdk
- name: download and install wabt
run: |
cd /opt
sudo wget ${{ matrix.wabt_release }}
sudo tar -xzf wabt-1.0.24-*.tar.gz
sudo mv wabt-1.0.24 wabt
sudo tar -xzf wabt-1.0.31-*.tar.gz
sudo mv wabt-1.0.31 wabt
- name: build wasi-libc (needed for wasi-threads)
run: |
@ -402,12 +436,19 @@ jobs:
./iwasm wasm-apps/no_pthread.wasm
test:
needs: [build_iwasm, build_llvm_libraries, build_wamrc]
needs: [build_iwasm, build_llvm_libraries_on_ubuntu_2004, build_wamrc]
runs-on: ubuntu-20.04
strategy:
matrix:
running_mode:
["classic-interp", "fast-interp", "jit", "aot", "fast-jit"]
[
"classic-interp",
"fast-interp",
"jit",
"aot",
"fast-jit",
"multi-tier-jit",
]
test_option:
[
$DEFAULT_TEST_OPTIONS,
@ -416,6 +457,8 @@ jobs:
$THREADS_TEST_OPTIONS,
$WASI_TEST_OPTIONS,
]
llvm_cache_key:
["${{ needs.build_llvm_libraries_on_ubuntu_2004.outputs.cache_key }}"]
exclude:
# uncompatiable modes and features
# classic-interp and fast-interp don't support simd
@ -431,32 +474,39 @@ jobs:
test_option: $WASI_TEST_OPTIONS
- running_mode: "jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
# fast-jit is only tested on default mode, exclude other three
# fast-jit doesn't support multi module, simd, and threads
- running_mode: "fast-jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
- running_mode: "fast-jit"
test_option: $SIMD_TEST_OPTIONS
- running_mode: "fast-jit"
test_option: $THREADS_TEST_OPTIONS
# multi-tier-jit doesn't support multi module, simd, and threads
- running_mode: "multi-tier-jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $SIMD_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $THREADS_TEST_OPTIONS
steps:
- name: checkout
uses: actions/checkout@v3
- name: set env variable(if llvm are used)
if: matrix.running_mode == 'aot' || matrix.running_mode == 'jit'
if: matrix.running_mode == 'aot' || matrix.running_mode == 'jit' || matrix.running_mode == 'multi-tier-jit'
run: echo "USE_LLVM=true" >> $GITHUB_ENV
- name: set env variable(if x86_32 test needed)
if: >
(matrix.test_option == '$DEFAULT_TEST_OPTIONS' || matrix.test_option == '$THREADS_TEST_OPTIONS'
|| matrix.test_option == '$WASI_TEST_OPTIONS')
&& matrix.running_mode != 'fast-jit' && matrix.running_mode != 'jit'
&& matrix.running_mode != 'fast-jit' && matrix.running_mode != 'jit' && matrix.running_mode != 'multi-tier-jit'
run: echo "TEST_ON_X86_32=true" >> $GITHUB_ENV
#only download llvm libraries in jit and aot mode
- name: Get LLVM libraries
if: env.USE_LLVM == 'true'
id: cache_llvm
id: retrieve_llvm_libs
uses: actions/cache@v3
with:
path: |
@ -465,10 +515,10 @@ jobs:
./core/deps/llvm/build/lib
./core/deps/llvm/build/libexec
./core/deps/llvm/build/share
key: ubuntu-20.04-${{ env.LLVM_CACHE_SUFFIX }}
key: ${{ matrix.llvm_cache_key }}
- name: Quit if cache miss
if: env.USE_LLVM == 'true' && steps.cache_llvm.outputs.cache-hit != 'true'
if: env.USE_LLVM == 'true' && steps.retrieve_llvm_libs.outputs.cache-hit != 'true'
run: echo "::error::can not get prebuilt llvm libraries" && exit 1
- name: run tests

View File

@ -51,26 +51,28 @@ env:
FAST_INTERP_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=0 -DWAMR_BUILD_FAST_INTERP=1 -DWAMR_BUILD_INTERP=1 -DWAMR_BUILD_JIT=0 -DWAMR_BUILD_LAZY_JIT=0"
LLVM_LAZY_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=0 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=1"
LLVM_EAGER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=0 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=0"
LLVM_CACHE_SUFFIX: "build-llvm_libraries_ex"
jobs:
build_llvm_libraries:
uses: ./.github/workflows/build_llvm_libraries.yml
with:
runs-on: "['macos-latest']"
os: "macos-latest"
arch: "X86"
build_wamrc:
needs: [build_llvm_libraries]
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [macos-latest]
include:
- os: macos-latest
llvm_cache_key: ${{ needs.build_llvm_libraries.outputs.cache_key }}
steps:
- name: checkout
uses: actions/checkout@v3
- name: Get LLVM libraries
id: cache_llvm
id: retrieve_llvm_libs
uses: actions/cache@v3
with:
path: |
@ -79,10 +81,10 @@ jobs:
./core/deps/llvm/build/lib
./core/deps/llvm/build/libexec
./core/deps/llvm/build/share
key: ${{ matrix.os }}-${{ env.LLVM_CACHE_SUFFIX }}
key: ${{ matrix.llvm_cache_key }}
- name: Quit if cache miss
if: steps.cache_llvm.outputs.cache-hit != 'true'
if: steps.retrieve_llvm_libs.outputs.cache-hit != 'true'
run: echo "::error::can not get prebuilt llvm libraries" && exit 1
- name: Build wamrc
@ -166,13 +168,16 @@ jobs:
make_options_feature: "-DWAMR_BUILD_MINI_LOADER=1"
- make_options_run_mode: $LLVM_EAGER_JIT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_MINI_LOADER=1"
include:
- os: macos-latest
llvm_cache_key: ${{ needs.build_llvm_libraries.outputs.cache_key }}
steps:
- name: checkout
uses: actions/checkout@v3
# only download llvm cache when needed
- name: Get LLVM libraries
id: cache_llvm
id: retrieve_llvm_libs
if: endsWith(matrix.make_options_run_mode, '_JIT_BUILD_OPTIONS')
uses: actions/cache@v3
with:
@ -182,10 +187,10 @@ jobs:
./core/deps/llvm/build/lib
./core/deps/llvm/build/libexec
./core/deps/llvm/build/share
key: ${{ matrix.os }}-${{ env.LLVM_CACHE_SUFFIX }}
key: ${{ matrix.llvm_cache_key }}
- name: Quit if cache miss
if: endsWith(matrix.make_options_run_mode, '_JIT_BUILD_OPTIONS') && (steps.cache_llvm.outputs.cache-hit != 'true')
if: endsWith(matrix.make_options_run_mode, '_JIT_BUILD_OPTIONS') && (steps.retrieve_llvm_libs.outputs.cache-hit != 'true')
run: echo "::error::can not get prebuilt llvm libraries" && exit 1
- name: Build iwasm
@ -210,8 +215,14 @@ jobs:
#$AOT_BUILD_OPTIONS,
]
os: [macos-latest]
wasi_sdk_release: ["https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-16/wasi-sdk-16.0-macos.tar.gz"]
wabt_release: ["https://github.com/WebAssembly/wabt/releases/download/1.0.24/wabt-1.0.24-macos.tar.gz"]
wasi_sdk_release:
[
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-macos.tar.gz",
]
wabt_release:
[
"https://github.com/WebAssembly/wabt/releases/download/1.0.31/wabt-1.0.31-macos-12.tar.gz",
]
steps:
- name: checkout
uses: actions/checkout@v3
@ -220,24 +231,14 @@ jobs:
run: |
cd /opt
sudo wget ${{ matrix.wabt_release }}
sudo tar -xzf wabt-1.0.24-*.tar.gz
sudo mv wabt-1.0.24 wabt
sudo tar -xzf wabt-1.0.31-*.tar.gz
sudo mv wabt-1.0.31 wabt
- name: Build Sample [wasm-c-api]
run: |
mkdir build && cd build
cmake .. ${{ matrix.make_options }}
cmake --build . --config Release --parallel 4
./callback
./callback_chain
./empty_imports
./global
./hello
./hostref
./memory
./reflect
./table
./trap
cmake -S . -B build ${{ matrix.make_options }}
cmake --build build --config Release --parallel 4
ctest --test-dir build
working-directory: samples/wasm-c-api
build_samples_others:
@ -246,8 +247,14 @@ jobs:
strategy:
matrix:
os: [macos-latest]
wasi_sdk_release: ["https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-16/wasi-sdk-16.0-macos.tar.gz"]
wabt_release: ["https://github.com/WebAssembly/wabt/releases/download/1.0.24/wabt-1.0.24-macos.tar.gz"]
wasi_sdk_release:
[
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-macos.tar.gz",
]
wabt_release:
[
"https://github.com/WebAssembly/wabt/releases/download/1.0.31/wabt-1.0.31-macos-12.tar.gz",
]
steps:
- name: checkout
uses: actions/checkout@v3
@ -256,15 +263,15 @@ jobs:
run: |
cd /opt
sudo wget ${{ matrix.wasi_sdk_release }}
sudo tar -xzf wasi-sdk-16.0-*.tar.gz
sudo mv wasi-sdk-16.0 wasi-sdk
sudo tar -xzf wasi-sdk-*.tar.gz
sudo mv wasi-sdk-19.0 wasi-sdk
- name: download and install wabt
run: |
cd /opt
sudo wget ${{ matrix.wabt_release }}
sudo tar -xzf wabt-1.0.24-*.tar.gz
sudo mv wabt-1.0.24 wabt
sudo tar -xzf wabt-1.0.31-*.tar.gz
sudo mv wabt-1.0.31 wabt
- name: build wasi-libc (needed for wasi-threads)
run: |

View File

@ -65,9 +65,12 @@ jobs:
"boards/risc-v/k210/maix-bit/configs/nsh",
]
wamr_config_option: [
"CONFIG_INTERPRETERS_WAMR=y\\nCONFIG_INTERPRETERS_WAMR_AOT=y\\nCONFIG_INTERPRETERS_WAMR_FAST=y\\nCONFIG_INTERPRETERS_WAMR_LIBC_BUILTIN=y\\n",
"CONFIG_INTERPRETERS_WAMR=y\\nCONFIG_INTERPRETERS_WAMR_AOT=y\\nCONFIG_INTERPRETERS_WAMR_FAST=y\\n",
"CONFIG_INTERPRETERS_WAMR=y\\nCONFIG_INTERPRETERS_WAMR_AOT=y\\nCONFIG_INTERPRETERS_WAMR_FAST=y\\nCONFIG_INTERPRETERS_WAMR_LIBC_WASI=y\\n",
"CONFIG_INTERPRETERS_WAMR=y\\nCONFIG_INTERPRETERS_WAMR_AOT=y\\nCONFIG_INTERPRETERS_WAMR_FAST=y\\nCONFIG_INTERPRETERS_WAMR_LIBC_BUILTIN=y\\n",
"CONFIG_INTERPRETERS_WAMR=y\\nCONFIG_INTERPRETERS_WAMR_AOT=y\\nCONFIG_INTERPRETERS_WAMR_CLASSIC=y\\n",
"CONFIG_INTERPRETERS_WAMR=y\\nCONFIG_INTERPRETERS_WAMR_AOT=y\\nCONFIG_INTERPRETERS_WAMR_CLASSIC=y\\nCONFIG_INTERPRETERS_WAMR_LIBC_WASI=y\\n",
"CONFIG_INTERPRETERS_WAMR=y\\nCONFIG_INTERPRETERS_WAMR_AOT=y\\nCONFIG_INTERPRETERS_WAMR_CLASSIC=y\\nCONFIG_INTERPRETERS_WAMR_LIBC_BUILTIN=y\\n",
"CONFIG_INTERPRETERS_WAMR=y\\nCONFIG_INTERPRETERS_WAMR_AOT=y\\nCONFIG_INTERPRETERS_WAMR_LIBC_BUILTIN=y\\n",
"CONFIG_INTERPRETERS_WAMR=y\\nCONFIG_INTERPRETERS_WAMR_AOT=y\\n",
"CONFIG_INTERPRETERS_WAMR=y\\nCONFIG_INTERPRETERS_WAMR_FAST=y\\n",
@ -112,7 +115,7 @@ jobs:
- name: Enable WAMR for NuttX
run: |
find nuttx/boards -name defconfig | xargs sed -i '$a\CONFIG_EOL_IS_LF=y\n${{ matrix.wamr_config_option }}'
find nuttx/boards -name defconfig | xargs sed -i '$a\CONFIG_EOL_IS_LF=y\nCONFIG_PSEUDOFS_SOFTLINKS=y\n${{ matrix.wamr_config_option }}'
find nuttx/boards/sim -name defconfig | xargs sed -i '$a\CONFIG_LIBM=y\n'
- name: Build

View File

@ -51,13 +51,13 @@ env:
FAST_INTERP_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=0 -DWAMR_BUILD_FAST_INTERP=1 -DWAMR_BUILD_INTERP=1 -DWAMR_BUILD_JIT=0 -DWAMR_BUILD_LAZY_JIT=0"
LLVM_LAZY_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=0 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=1"
LLVM_EAGER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=0 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=0"
LLVM_CACHE_SUFFIX: "build-llvm_libraries_ex"
jobs:
build_llvm_libraries:
uses: ./.github/workflows/build_llvm_libraries.yml
with:
runs-on: "['ubuntu-20.04']"
os: "ubuntu-20.04"
arch: "X86"
build_iwasm:
runs-on: ${{ matrix.os }}
@ -131,7 +131,9 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-20.04]
include:
- os: ubuntu-20.04
llvm_cache_key: ${{ needs.build_llvm_libraries.outputs.cache_key }}
steps:
- name: install SGX SDK and necessary libraries
run: |
@ -150,7 +152,7 @@ jobs:
uses: actions/checkout@v3
- name: Get LLVM libraries
id: cache_llvm
id: retrieve_llvm_libs
uses: actions/cache@v3
with:
path: |
@ -159,10 +161,10 @@ jobs:
./core/deps/llvm/build/lib
./core/deps/llvm/build/libexec
./core/deps/llvm/build/share
key: ${{ matrix.os }}-${{ env.LLVM_CACHE_SUFFIX }}
key: ${{ matrix.llvm_cache_key }}
- name: Quit if cache miss
if: steps.cache_llvm.outputs.cache-hit != 'true'
if: steps.retrieve_llvm_libs.outputs.cache-hit != 'true'
run: echo "::error::can not get prebuilt llvm libraries" && exit 1
- name: Build wamrc
@ -189,11 +191,11 @@ jobs:
os: [ubuntu-20.04]
wasi_sdk_release:
[
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-12/wasi-sdk-12.0-linux.tar.gz",
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-linux.tar.gz",
]
wabt_release:
[
"https://github.com/WebAssembly/wabt/releases/download/1.0.24/wabt-1.0.24-ubuntu.tar.gz",
"https://github.com/WebAssembly/wabt/releases/download/1.0.31/wabt-1.0.31-ubuntu.tar.gz",
]
steps:
- name: checkout
@ -203,8 +205,8 @@ jobs:
run: |
cd /opt
sudo wget ${{ matrix.wabt_release }}
sudo tar -xzf wabt-1.0.24-*.tar.gz
sudo mv wabt-1.0.24 wabt
sudo tar -xzf wabt-1.0.31-*.tar.gz
sudo mv wabt-1.0.31 wabt
- name: install SGX SDK and necessary libraries
run: |
@ -221,19 +223,9 @@ jobs:
- name: Build Sample [wasm-c-api]
run: |
mkdir build && cd build
cmake .. ${{ matrix.make_options }}
cmake --build . --config Release --parallel 4
./callback
./callback_chain
./empty_imports
./global
./hello
./hostref
./memory
./reflect
./table
./trap
cmake -S . -B build ${{ matrix.make_options }}
cmake --build build --config Release --parallel 4
ctest --test-dir build
working-directory: samples/wasm-c-api
build_samples_others:
@ -244,11 +236,11 @@ jobs:
os: [ubuntu-20.04]
wasi_sdk_release:
[
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-16/wasi-sdk-16.0-linux.tar.gz",
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-linux.tar.gz",
]
wabt_release:
[
"https://github.com/WebAssembly/wabt/releases/download/1.0.24/wabt-1.0.24-ubuntu.tar.gz",
"https://github.com/WebAssembly/wabt/releases/download/1.0.31/wabt-1.0.31-ubuntu.tar.gz",
]
steps:
- name: checkout
@ -258,15 +250,15 @@ jobs:
run: |
cd /opt
sudo wget ${{ matrix.wasi_sdk_release }}
sudo tar -xzf wasi-sdk-16.0-*.tar.gz
sudo mv wasi-sdk-16.0 wasi-sdk
sudo tar -xzf wasi-sdk-*.tar.gz
sudo mv wasi-sdk-19.0 wasi-sdk
- name: download and install wabt
run: |
cd /opt
sudo wget ${{ matrix.wabt_release }}
sudo tar -xzf wabt-1.0.24-*.tar.gz
sudo mv wabt-1.0.24 wabt
sudo tar -xzf wabt-1.0.31-*.tar.gz
sudo mv wabt-1.0.31 wabt
- name: build wasi-libc (needed for wasi-threads)
run: |
@ -358,6 +350,7 @@ jobs:
matrix:
running_mode: ["classic-interp", "fast-interp", "aot"]
test_option: ["-x -p -s spec -b -P", "-x -p -s spec -S -b -P"]
llvm_cache_key: ["${{ needs.build_llvm_libraries.outputs.cache_key }}"]
# classic-interp and fast-interp don't support simd
exclude:
- running_mode: "classic-interp"
@ -371,7 +364,7 @@ jobs:
- name: Get LLVM libraries
if: matrix.running_mode == 'aot'
id: cache_llvm
id: retrieve_llvm_libs
uses: actions/cache@v3
with:
path: |
@ -380,10 +373,10 @@ jobs:
./core/deps/llvm/build/lib
./core/deps/llvm/build/libexec
./core/deps/llvm/build/share
key: ubuntu-20.04-${{ env.LLVM_CACHE_SUFFIX }}
key: ${{ matrix.llvm_cache_key }}
- name: Quit if cache miss
if: matrix.running_mode == 'aot' && steps.cache_llvm.outputs.cache-hit != 'true'
if: matrix.running_mode == 'aot' && steps.retrieve_llvm_libs.outputs.cache-hit != 'true'
run: echo "::error::can not get prebuilt llvm libraries" && exit 1
- name: install SGX SDK and necessary libraries

View File

@ -123,7 +123,7 @@ jobs:
runner: ubuntu-20.04
upload_url: ${{ needs.create_release.outputs.upload_url }}
ver_num: ${{ needs.create_tag.outputs.new_ver}}
wasi_sdk_url: https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-12/wasi-sdk-12.0-linux.tar.gz
wasi_sdk_url: https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-linux.tar.gz
release_wamr_sdk_on_ubuntu_2204:
needs: [create_tag, create_release]
@ -133,7 +133,7 @@ jobs:
runner: ubuntu-22.04
upload_url: ${{ needs.create_release.outputs.upload_url }}
ver_num: ${{ needs.create_tag.outputs.new_ver}}
wasi_sdk_url: https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-12/wasi-sdk-12.0-linux.tar.gz
wasi_sdk_url: https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-linux.tar.gz
release_wamr_sdk_on_macos:
needs: [create_tag, create_release]
@ -143,7 +143,7 @@ jobs:
runner: macos-latest
upload_url: ${{ needs.create_release.outputs.upload_url }}
ver_num: ${{ needs.create_tag.outputs.new_ver}}
wasi_sdk_url: https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-12/wasi-sdk-12.0-macos.tar.gz
wasi_sdk_url: https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-macos.tar.gz
#
# vscode extension cross-platform

View File

@ -16,7 +16,8 @@ jobs:
build_llvm_libraries:
uses: ./.github/workflows/build_llvm_libraries.yml
with:
runs-on: "['ubuntu-22.04']"
os: "ubuntu-22.04"
arch: "ARM RISCV AArch64"
spec_test_on_qemu:
runs-on: ${{ matrix.os }}
@ -37,6 +38,7 @@ jobs:
"-t aot",
"-t aot -X"
]
llvm_cache_key: [ "${{ needs.build_llvm_libraries.outputs.cache_key }}" ]
steps:
- name: Install Utilities
run: |
@ -72,7 +74,7 @@ jobs:
path: apps/interpreters/wamr/wamr
- name: Get LLVM libraries
id: cache_llvm
id: retrieve_llvm_libs
uses: actions/cache@v3
with:
path: |
@ -81,10 +83,10 @@ jobs:
./core/deps/llvm/build/lib
./core/deps/llvm/build/libexec
./core/deps/llvm/build/share
key: ${{ matrix.os }}-${{ env.LLVM_CACHE_SUFFIX }}
key: ${{ matrix.llvm_cache_key }}
- name: Quit if cache miss
if: steps.cache_llvm.outputs.cache-hit != 'true'
if: steps.retrieve_llvm_libs.outputs.cache-hit != 'true'
run: echo "::error::can not get prebuilt llvm libraries" && exit 1
- name: Copy LLVM

3
.gitignore vendored
View File

@ -32,3 +32,6 @@ samples/socket-api/wasm-src/inc/pthread.h
**/__pycache__
tests/benchmarks/coremark/coremark*
samples/workload/include/**
!samples/workload/include/.gitkeep

View File

@ -7,6 +7,7 @@
import argparse
import os
import pathlib
import requests
import shlex
import shutil
import subprocess
@ -21,28 +22,43 @@ def clone_llvm(dst_dir, llvm_repo, llvm_branch):
llvm_dir = dst_dir.joinpath("llvm").resolve()
if not llvm_dir.exists():
print(f"Clone llvm to {llvm_dir} ...")
GIT_CLONE_CMD = f"git clone --depth 1 --branch {llvm_branch} {llvm_repo} llvm"
subprocess.check_output(shlex.split(GIT_CLONE_CMD), cwd=dst_dir)
else:
print(f"There is an LLVM local repo in {llvm_dir}, clean and keep using it")
return llvm_dir
def build_llvm(llvm_dir, platform, backends, projects):
def query_llvm_version(llvm_info):
github_token = os.environ['GH_TOKEN']
owner_project = llvm_info['repo'].replace("https://github.com/", "").replace(".git", "")
url = f"https://api.github.com/repos/{owner_project}/commits/{llvm_info['branch']}"
headers = {
'Authorization': f"Bearer {github_token}"
}
try:
response = requests.request("GET", url, headers=headers, data={})
response.raise_for_status()
except requests.exceptions.HTTPError as error:
print (error) # for debugging purpose
return None
response = response.json()
return response['sha']
def build_llvm(llvm_dir, platform, backends, projects, use_clang=False):
LLVM_COMPILE_OPTIONS = [
'-DCMAKE_BUILD_TYPE:STRING="Release"',
"-DCMAKE_EXPORT_COMPILE_COMMANDS=ON",
"-DLLVM_APPEND_VC_REV:BOOL=ON",
"-DLLVM_BUILD_BENCHMARKS:BOOL=OFF",
"-DLLVM_BUILD_DOCS:BOOL=OFF",
"-DLLVM_BUILD_EXAMPLES:BOOL=OFF",
"-DLLVM_BUILD_LLVM_DYLIB:BOOL=OFF",
"-DLLVM_BUILD_TESTS:BOOL=OFF",
"-DLLVM_CCACHE_BUILD:BOOL=OFF",
"-DLLVM_CCACHE_BUILD:BOOL=ON",
"-DLLVM_ENABLE_BINDINGS:BOOL=OFF",
"-DLLVM_ENABLE_IDE:BOOL=OFF",
"-DLLVM_ENABLE_LIBEDIT=OFF",
"-DLLVM_ENABLE_TERMINFO:BOOL=OFF",
"-DLLVM_ENABLE_ZLIB:BOOL=OFF",
"-DLLVM_INCLUDE_BENCHMARKS:BOOL=OFF",
@ -54,6 +70,18 @@ def build_llvm(llvm_dir, platform, backends, projects):
"-DLLVM_OPTIMIZED_TABLEGEN:BOOL=ON",
]
# use clang/clang++/lld. but macos doesn't support lld
if not sys.platform.startswith("darwin") and use_clang:
if shutil.which("clang") and shutil.which("clang++") and shutil.which("lld"):
os.environ["CC"] = "clang"
os.environ["CXX"] = "clang++"
LLVM_COMPILE_OPTIONS.append('-DLLVM_USE_LINKER:STRING="lld"')
print("Use the clang toolchain")
else:
print("Can not find clang, clang++ and lld, keep using the gcc toolchain")
else:
print("Use the gcc toolchain")
LLVM_EXTRA_COMPILE_OPTIONS = {
"arc": [
'-DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD:STRING="ARC"',
@ -99,8 +127,10 @@ def build_llvm(llvm_dir, platform, backends, projects):
lib_llvm_core_library = build_dir.joinpath("lib/libLLVMCore.a").resolve()
if lib_llvm_core_library.exists():
print(f"Please remove {build_dir} manually and try again")
return build_dir
print(
f"It has already been fully compiled. If want to a re-build, please remove {build_dir} manually and try again"
)
return None
compile_options = " ".join(
LLVM_COMPILE_OPTIONS
@ -119,10 +149,11 @@ def build_llvm(llvm_dir, platform, backends, projects):
CONFIG_CMD += " -G'Unix Makefiles'"
else:
CONFIG_CMD += " -A x64"
print(f"{CONFIG_CMD}")
else:
CONFIG_CMD += " -G'Ninja'"
subprocess.check_call(shlex.split(CONFIG_CMD), cwd=build_dir)
BUILD_CMD = f"cmake --build . --target package --parallel {os.cpu_count()}" + (
BUILD_CMD = "cmake --build . --target package" + (
" --config Release" if "windows" == platform else ""
)
subprocess.check_call(shlex.split(BUILD_CMD), cwd=build_dir)
@ -133,23 +164,25 @@ def build_llvm(llvm_dir, platform, backends, projects):
def repackage_llvm(llvm_dir):
build_dir = llvm_dir.joinpath("./build").resolve()
packs = [f for f in build_dir.glob("LLVM-13*.tar.gz")]
packs = [f for f in build_dir.glob("LLVM-*.tar.gz")]
if len(packs) > 1:
raise Exception("Find more than one LLVM-13*.tar.gz")
raise Exception("Find more than one LLVM-*.tar.gz")
if not packs:
return
llvm_package = packs[0].name
# mv build/LLVM-13.0.0*.gz .
# mv build/LLVM-*.gz .
shutil.move(str(build_dir.joinpath(llvm_package).resolve()), str(llvm_dir))
# rm -r build
shutil.rmtree(str(build_dir))
# mkdir build
build_dir.mkdir()
# tar xf ./LLVM-13.0.0-*.tar.gz --strip-components=1 --directory=build
# tar xf ./LLVM-*.tar.gz --strip-components=1 --directory=build
CMD = f"tar xf {llvm_dir.joinpath(llvm_package).resolve()} --strip-components=1 --directory={build_dir}"
subprocess.check_call(shlex.split(CMD), cwd=llvm_dir)
# rm ./LLVM-1*.gz
os.remove(llvm_dir.joinpath(llvm_package).resolve())
def main():
@ -184,8 +217,17 @@ def main():
choices=["clang", "lldb"],
help="identify extra LLVM projects, separate by space, like '--project clang lldb'",
)
parser.add_argument(
"--llvm-ver",
action="store_true",
help="return the version info of generated llvm libraries",
)
parser.add_argument(
"--use-clang",
action="store_true",
help="use clang instead of gcc",
)
options = parser.parse_args()
print(f"options={options}")
# if the "platform" is not identified in the command line option,
# detect it
@ -199,12 +241,10 @@ def main():
else:
platform = options.platform
print(f"========== Build LLVM for {platform} ==========\n")
llvm_repo_and_branch = {
"arc": {
"repo": "https://github.com/llvm/llvm-project.git",
"branch": "release/13.x",
"branch": "release/15.x",
},
"xtensa": {
"repo": "https://github.com/espressif/llvm-project.git",
@ -212,7 +252,7 @@ def main():
},
"default": {
"repo": "https://github.com/llvm/llvm-project.git",
"branch": "release/13.x",
"branch": "release/15.x",
},
}
@ -225,19 +265,22 @@ def main():
deps_dir = current_dir.joinpath("../core/deps").resolve()
try:
print(f"==================== CLONE LLVM ====================")
llvm_info = llvm_repo_and_branch.get(platform, llvm_repo_and_branch["default"])
if options.llvm_ver:
commit_hash = query_llvm_version(llvm_info)
print(commit_hash)
return commit_hash is not None
llvm_dir = clone_llvm(deps_dir, llvm_info["repo"], llvm_info["branch"])
print()
print(f"==================== BUILD LLVM ====================")
build_llvm(llvm_dir, platform, options.arch, options.project)
print()
print(f"==================== PACKAGE LLVM ====================")
if (
build_llvm(
llvm_dir, platform, options.arch, options.project, options.use_clang
)
is not None
):
repackage_llvm(llvm_dir)
print()
return True
except subprocess.CalledProcessError:
return False

View File

@ -0,0 +1 @@
requests==2.28.2

View File

@ -19,6 +19,11 @@ endif ()
if (NOT DEFINED DEPS_DIR)
set (DEPS_DIR ${WAMR_ROOT_DIR}/core/deps)
endif ()
if (NOT DEFINED SHARED_PLATFORM_CONFIG)
# CMake file for platform configuration. The PLATFORM_SHARED_SOURCE varable
# should point to a list of platform-specfic source files to compile.
set (SHARED_PLATFORM_CONFIG ${SHARED_DIR}/platform/${WAMR_BUILD_PLATFORM}/shared_platform.cmake)
endif ()
if (DEFINED EXTRA_SDK_INCLUDE_PATH)
message(STATUS, "EXTRA_SDK_INCLUDE_PATH = ${EXTRA_SDK_INCLUDE_PATH} ")
@ -96,9 +101,13 @@ if (WAMR_BUILD_LIB_PTHREAD_SEMAPHORE EQUAL 1)
endif ()
if (WAMR_BUILD_WASI_NN EQUAL 1)
if (NOT EXISTS "${WAMR_ROOT_DIR}/core/deps/tensorflow-src")
execute_process(COMMAND ${WAMR_ROOT_DIR}/core/deps/install_tensorflow.sh
RESULT_VARIABLE TENSORFLOW_RESULT
)
else ()
message("Tensorflow is already downloaded.")
endif()
set(TENSORFLOW_SOURCE_DIR "${WAMR_ROOT_DIR}/core/deps/tensorflow-src")
include_directories (${CMAKE_CURRENT_BINARY_DIR}/flatbuffers/include)
include_directories (${TENSORFLOW_SOURCE_DIR})
@ -169,7 +178,7 @@ LIST (APPEND RUNTIME_LIB_HEADER_LIST ${header})
enable_language (ASM)
include (${SHARED_DIR}/platform/${WAMR_BUILD_PLATFORM}/shared_platform.cmake)
include (${SHARED_PLATFORM_CONFIG})
include (${SHARED_DIR}/mem-alloc/mem_alloc.cmake)
include (${IWASM_DIR}/common/iwasm_common.cmake)
include (${SHARED_DIR}/utils/shared_utils.cmake)

View File

@ -30,6 +30,8 @@ bh_static_assert(offsetof(WASMExecEnv, aux_stack_boundary)
bh_static_assert(offsetof(WASMExecEnv, aux_stack_bottom)
== 7 * sizeof(uintptr_t));
bh_static_assert(offsetof(WASMExecEnv, native_symbol) == 8 * sizeof(uintptr_t));
bh_static_assert(offsetof(WASMExecEnv, native_stack_top_min)
== 9 * sizeof(uintptr_t));
bh_static_assert(offsetof(AOTModuleInstance, memories) == 1 * sizeof(uint64));
bh_static_assert(offsetof(AOTModuleInstance, func_ptrs) == 5 * sizeof(uint64));
@ -1083,6 +1085,17 @@ aot_instantiate(AOTModule *module, bool is_sub_inst, uint32 stack_size,
}
#endif
#if WASM_ENABLE_WASI_NN != 0
if (!is_sub_inst) {
if (!(((AOTModuleInstanceExtra *)module_inst->e)->wasi_nn_ctx =
wasi_nn_initialize())) {
set_error_buf(error_buf, error_buf_size,
"wasi nn initialization failed");
goto fail;
}
}
#endif
/* Initialize the thread related data */
if (stack_size == 0)
stack_size = DEFAULT_WASM_STACK_SIZE;
@ -1194,6 +1207,15 @@ aot_deinstantiate(AOTModuleInstance *module_inst, bool is_sub_inst)
wasm_runtime_free(
((AOTModuleInstanceExtra *)module_inst->e)->c_api_func_imports);
#if WASM_ENABLE_WASI_NN != 0
if (!is_sub_inst) {
WASINNContext *wasi_nn_ctx =
((AOTModuleInstanceExtra *)module_inst->e)->wasi_nn_ctx;
if (wasi_nn_ctx)
wasi_nn_destroy(wasi_nn_ctx);
}
#endif
wasm_runtime_free(module_inst);
}
@ -1237,6 +1259,7 @@ invoke_native_with_hw_bound_check(WASMExecEnv *exec_env, void *func_ptr,
/* Check native stack overflow firstly to ensure we have enough
native stack to run the following codes before actually calling
the aot function in invokeNative function. */
RECORD_STACK_USAGE(exec_env, (uint8 *)&module_inst);
if ((uint8 *)&module_inst < exec_env->native_stack_boundary
+ page_size * (guard_page_count + 1)) {
aot_set_exception_with_id(module_inst, EXCE_NATIVE_STACK_OVERFLOW);
@ -1836,6 +1859,7 @@ aot_call_indirect(WASMExecEnv *exec_env, uint32 tbl_idx, uint32 table_elem_idx,
exec_env->native_stack_boundary must have been set, we don't set
it again */
RECORD_STACK_USAGE(exec_env, (uint8 *)&module_inst);
if ((uint8 *)&module_inst < exec_env->native_stack_boundary) {
aot_set_exception_with_id(module_inst, EXCE_NATIVE_STACK_OVERFLOW);
goto fail;

View File

@ -11,6 +11,10 @@
#include "../interpreter/wasm_runtime.h"
#include "../compilation/aot.h"
#if WASM_ENABLE_WASI_NN != 0
#include "../libraries/wasi-nn/src/wasi_nn_private.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
@ -75,6 +79,9 @@ typedef struct AOTFunctionInstance {
typedef struct AOTModuleInstanceExtra {
CApiFuncImport *c_api_func_imports;
#if WASM_ENABLE_WASI_NN != 0
WASINNContext *wasi_nn_ctx;
#endif
} AOTModuleInstanceExtra;
#if defined(OS_ENABLE_HW_BOUND_CHECK) && defined(BH_PLATFORM_WINDOWS)

View File

@ -16,9 +16,14 @@ _invokeNative:
push %ebp
movl %esp, %ebp
movl 16(%ebp), %ecx /* ecx = argc */
movl 12(%ebp), %edx /* edx = argv */
leal 2(%ecx), %edx /* edx = ecx + 2 (count return address and saved ebp) */
andl $3, %edx /* edx = edx % 4 */
jz stack_aligned /* if edx == 0, stack is already 16 bytes aligned */
leal -16(%esp, %edx, 4), %esp /* esp = esp - 16 + edx * 4 */
stack_aligned:
test %ecx, %ecx
jz skip_push_args /* if ecx == 0, skip pushing arguments */
movl 12(%ebp), %edx /* edx = argv */
leal -4(%edx,%ecx,4), %edx /* edx = edx + ecx * 4 - 4 */
subl %esp, %edx /* edx = edx - esp */
1:

View File

@ -276,7 +276,7 @@ WASM_DEFINE_VEC_OWN(store, wasm_store_delete)
WASM_DEFINE_VEC_OWN(valtype, wasm_valtype_delete)
#ifndef NDEBUG
#if WAMR_BUILD_MEMORY_PROFILING != 0
#if WASM_ENABLE_MEMORY_PROFILING != 0
#define WASM_C_DUMP_PROC_MEM() LOG_PROC_MEM()
#else
#define WASM_C_DUMP_PROC_MEM() (void)0
@ -398,7 +398,7 @@ wasm_engine_new_internal(mem_alloc_type_t type, const MemAllocOption *opts)
}
/* global engine instance */
static wasm_engine_t *singleton_engine = NULL;
static wasm_engine_t *singleton_engine;
#ifdef os_thread_local_attribute
/* categorize wasm_store_t as threads*/
static os_thread_local_attribute unsigned thread_local_stores_num = 0;
@ -1458,6 +1458,30 @@ wasm_importtype_type(const wasm_importtype_t *import_type)
return import_type->extern_type;
}
bool
wasm_importtype_is_linked(const wasm_importtype_t *import_type)
{
if (!import_type)
return false;
const wasm_name_t *module_name = wasm_importtype_module(import_type);
const wasm_name_t *field_name = wasm_importtype_name(import_type);
switch (wasm_externtype_kind(wasm_importtype_type(import_type))) {
case WASM_EXTERN_FUNC:
return wasm_runtime_is_import_func_linked(module_name->data,
field_name->data);
case WASM_EXTERN_GLOBAL:
return wasm_runtime_is_import_global_linked(module_name->data,
field_name->data);
case WASM_EXTERN_MEMORY:
case WASM_EXTERN_TABLE:
default:
break;
}
return false;
}
own wasm_exporttype_t *
wasm_exporttype_new(own wasm_byte_vec_t *name,
own wasm_externtype_t *extern_type)
@ -2537,12 +2561,12 @@ wasm_module_imports(const wasm_module_t *module, own wasm_importtype_vec_t *out)
bh_assert(extern_type);
wasm_name_new_from_string(&module_name, module_name_rt);
wasm_name_new_from_string_nt(&module_name, module_name_rt);
if (strlen(module_name_rt) && !module_name.data) {
goto failed;
}
wasm_name_new_from_string(&name, field_name_rt);
wasm_name_new_from_string_nt(&name, field_name_rt);
if (strlen(field_name_rt) && !name.data) {
goto failed;
}
@ -2622,7 +2646,7 @@ wasm_module_exports(const wasm_module_t *module, wasm_exporttype_vec_t *out)
}
/* byte* -> wasm_byte_vec_t */
wasm_name_new_from_string(&name, export->name);
wasm_name_new_from_string_nt(&name, export->name);
if (strlen(export->name) && !name.data) {
goto failed;
}
@ -3008,6 +3032,20 @@ failed:
return NULL;
}
static wasm_func_t *
wasm_func_new_empty(wasm_store_t *store)
{
wasm_func_t *func = NULL;
if (!(func = malloc_internal(sizeof(wasm_func_t))))
goto failed;
func->store = store;
func->kind = WASM_EXTERN_FUNC;
RETURN_OBJ(func, wasm_func_delete)
}
void
wasm_func_delete(wasm_func_t *func)
{
@ -3211,7 +3249,8 @@ wasm_func_call(const wasm_func_t *func, const wasm_val_vec_t *params,
wasm_name_t message = { 0 };
wasm_trap_t *trap;
wasm_name_new_from_string(&message, "failed to call unlinked function");
wasm_name_new_from_string_nt(&message,
"failed to call unlinked function");
trap = wasm_trap_new(func->store, &message);
wasm_byte_vec_delete(&message);
@ -3371,6 +3410,25 @@ failed:
return NULL;
}
static wasm_global_t *
wasm_global_new_empty(wasm_store_t *store)
{
wasm_global_t *global = NULL;
global = malloc_internal(sizeof(wasm_global_t));
if (!global)
goto failed;
global->store = store;
global->kind = WASM_EXTERN_GLOBAL;
return global;
failed:
LOG_DEBUG("%s failed", __FUNCTION__);
wasm_global_delete(global);
return NULL;
}
/* almost same with wasm_global_new */
wasm_global_t *
wasm_global_copy(const wasm_global_t *src)
@ -4205,7 +4263,8 @@ wasm_memory_data_size(const wasm_memory_t *memory)
(WASMModuleInstance *)module_inst_comm;
WASMMemoryInstance *memory_inst =
module_inst->memories[memory->memory_idx_rt];
return memory_inst->cur_page_count * memory_inst->num_bytes_per_page;
return (size_t)memory_inst->cur_page_count
* memory_inst->num_bytes_per_page;
}
#endif
@ -4215,7 +4274,8 @@ wasm_memory_data_size(const wasm_memory_t *memory)
AOTMemoryInstance *memory_inst =
((AOTMemoryInstance **)
module_inst->memories)[memory->memory_idx_rt];
return memory_inst->cur_page_count * memory_inst->num_bytes_per_page;
return (size_t)memory_inst->cur_page_count
* memory_inst->num_bytes_per_page;
}
#endif
@ -4286,6 +4346,11 @@ interp_link_func(const wasm_instance_t *inst, const WASMModule *module_interp,
imported_func_interp = module_interp->import_functions + func_idx_rt;
bh_assert(imported_func_interp);
bh_assert(imported_func_interp->kind == IMPORT_KIND_FUNC);
/* it is a placeholder and let's skip it*/
if (!import->type)
return true;
/* type comparison */
if (!wasm_functype_same_internal(
@ -4300,6 +4365,8 @@ interp_link_func(const wasm_instance_t *inst, const WASMModule *module_interp,
imported_func_interp->u.function.func_ptr_linked = import->u.cb_env.cb;
else
imported_func_interp->u.function.func_ptr_linked = import->u.cb;
bh_assert(imported_func_interp->u.function.func_ptr_linked);
import->func_idx_rt = func_idx_rt;
(void)inst;
@ -4318,12 +4385,19 @@ interp_link_global(const WASMModule *module_interp, uint16 global_idx_rt,
imported_global_interp = module_interp->import_globals + global_idx_rt;
bh_assert(imported_global_interp);
bh_assert(imported_global_interp->kind == IMPORT_KIND_GLOBAL);
/* it is a placeholder and let's skip it*/
if (!import->type)
return true;
/* type comparison */
if (!cmp_val_kind_with_val_type(wasm_valtype_kind(import->type->val_type),
imported_global_interp->u.global.type))
return false;
/* set init value */
bh_assert(import->init);
switch (wasm_valtype_kind(import->type->val_type)) {
case WASM_I32:
imported_global_interp->u.global.global_data_linked.i32 =
@ -4350,58 +4424,6 @@ interp_link_global(const WASMModule *module_interp, uint16 global_idx_rt,
return true;
}
static bool
interp_link(const wasm_instance_t *inst, const WASMModule *module_interp,
wasm_extern_t *imports[])
{
uint32 i = 0;
uint32 import_func_i = 0;
uint32 import_global_i = 0;
bh_assert(inst && module_interp && imports);
for (i = 0; i < module_interp->import_count; ++i) {
wasm_extern_t *import = imports[i];
WASMImport *import_rt = module_interp->imports + i;
switch (import_rt->kind) {
case IMPORT_KIND_FUNC:
{
if (!interp_link_func(inst, module_interp, import_func_i,
wasm_extern_as_func(import))) {
LOG_WARNING("link #%d function failed", import_func_i);
goto failed;
}
import_func_i++;
break;
}
case IMPORT_KIND_GLOBAL:
{
if (!interp_link_global(module_interp, import_global_i,
wasm_extern_as_global(import))) {
LOG_WARNING("link #%d global failed", import_global_i);
goto failed;
}
import_global_i++;
break;
}
case IMPORT_KIND_MEMORY:
case IMPORT_KIND_TABLE:
default:
ASSERT_NOT_IMPLEMENTED();
LOG_WARNING("%s meets unsupported kind: %d", __FUNCTION__,
import_rt->kind);
goto failed;
}
}
return true;
failed:
LOG_DEBUG("%s failed", __FUNCTION__);
return false;
}
static bool
interp_process_export(wasm_store_t *store,
const WASMModuleInstance *inst_interp,
@ -4501,6 +4523,10 @@ aot_link_func(const wasm_instance_t *inst, const AOTModule *module_aot,
import_aot_func = module_aot->import_funcs + import_func_idx_rt;
bh_assert(import_aot_func);
/* it is a placeholder and let's skip it*/
if (!import->type)
return true;
/* type comparison */
if (!wasm_functype_same_internal(import->type, import_aot_func->func_type))
return false;
@ -4513,6 +4539,8 @@ aot_link_func(const wasm_instance_t *inst, const AOTModule *module_aot,
import_aot_func->func_ptr_linked = import->u.cb_env.cb;
else
import_aot_func->func_ptr_linked = import->u.cb;
bh_assert(import_aot_func->func_ptr_linked);
import->func_idx_rt = import_func_idx_rt;
return true;
@ -4530,6 +4558,10 @@ aot_link_global(const AOTModule *module_aot, uint16 global_idx_rt,
import_aot_global = module_aot->import_globals + global_idx_rt;
bh_assert(import_aot_global);
/* it is a placeholder and let's skip it*/
if (!import->type)
return true;
val_type = wasm_globaltype_content(import->type);
bh_assert(val_type);
@ -4537,6 +4569,7 @@ aot_link_global(const AOTModule *module_aot, uint16 global_idx_rt,
import_aot_global->type))
return false;
bh_assert(import->init);
switch (wasm_valtype_kind(val_type)) {
case WASM_I32:
import_aot_global->global_data_linked.i32 = import->init->of.i32;
@ -4557,62 +4590,6 @@ aot_link_global(const AOTModule *module_aot, uint16 global_idx_rt,
import->global_idx_rt = global_idx_rt;
import_aot_global->is_linked = true;
return true;
failed:
LOG_DEBUG("%s failed", __FUNCTION__);
return false;
}
static bool
aot_link(const wasm_instance_t *inst, const AOTModule *module_aot,
wasm_extern_t *imports[])
{
uint32 i = 0;
uint32 import_func_i = 0;
uint32 import_global_i = 0;
wasm_extern_t *import = NULL;
wasm_func_t *func = NULL;
wasm_global_t *global = NULL;
bh_assert(inst && module_aot && imports);
while (import_func_i < module_aot->import_func_count
|| import_global_i < module_aot->import_global_count) {
import = imports[i++];
bh_assert(import);
switch (wasm_extern_kind(import)) {
case WASM_EXTERN_FUNC:
bh_assert(import_func_i < module_aot->import_func_count);
func = wasm_extern_as_func((wasm_extern_t *)import);
if (!aot_link_func(inst, module_aot, import_func_i, func)) {
LOG_WARNING("link #%d function failed", import_func_i);
goto failed;
}
import_func_i++;
break;
case WASM_EXTERN_GLOBAL:
bh_assert(import_global_i < module_aot->import_global_count);
global = wasm_extern_as_global((wasm_extern_t *)import);
if (!aot_link_global(module_aot, import_global_i, global)) {
LOG_WARNING("link #%d global failed", import_global_i);
goto failed;
}
import_global_i++;
break;
case WASM_EXTERN_MEMORY:
case WASM_EXTERN_TABLE:
default:
ASSERT_NOT_IMPLEMENTED();
goto failed;
}
}
return true;
failed:
LOG_DEBUG("%s failed", __FUNCTION__);
return false;
@ -4693,7 +4670,7 @@ aot_process_export(wasm_store_t *store, const AOTModuleInstance *inst_aot,
goto failed;
}
wasm_name_new_from_string(external->name, export->name);
wasm_name_new_from_string_nt(external->name, export->name);
if (strlen(export->name) && !external->name->data) {
goto failed;
}
@ -4711,6 +4688,95 @@ failed:
}
#endif /* WASM_ENABLE_AOT */
static bool
do_link(const wasm_instance_t *inst, const wasm_module_t *module,
const wasm_extern_vec_t *imports)
{
uint32 i, import_func_i, import_global_i;
bh_assert(inst && module);
/* we have run a module_type check before. */
for (i = 0, import_func_i = 0, import_global_i = 0; i < imports->num_elems;
i++) {
wasm_extern_t *import = imports->data[i];
if (!import) {
LOG_ERROR("imports[%d] is NULL and it is fatal\n", i);
goto failed;
}
switch (wasm_extern_kind(import)) {
case WASM_EXTERN_FUNC:
{
bool ret = false;
#if WASM_ENABLE_INTERP != 0
if ((*module)->module_type == Wasm_Module_Bytecode) {
ret = interp_link_func(inst, MODULE_INTERP(module),
import_func_i,
wasm_extern_as_func(import));
}
#endif
#if WASM_ENABLE_AOT != 0
if ((*module)->module_type == Wasm_Module_AoT) {
ret = aot_link_func(inst, MODULE_AOT(module), import_func_i,
wasm_extern_as_func(import));
}
#endif
if (!ret) {
LOG_WARNING("link function #%d failed", import_func_i);
goto failed;
}
import_func_i++;
break;
}
case WASM_EXTERN_GLOBAL:
{
bool ret = false;
#if WASM_ENABLE_INTERP != 0
if ((*module)->module_type == Wasm_Module_Bytecode) {
ret = interp_link_global(MODULE_INTERP(module),
import_global_i,
wasm_extern_as_global(import));
}
#endif
#if WASM_ENABLE_AOT != 0
if ((*module)->module_type == Wasm_Module_AoT) {
ret = aot_link_global(MODULE_AOT(module), import_global_i,
wasm_extern_as_global(import));
}
#endif
if (!ret) {
LOG_WARNING("link global #%d failed", import_global_i);
goto failed;
}
import_global_i++;
break;
}
case WASM_EXTERN_MEMORY:
case WASM_EXTERN_TABLE:
{
LOG_WARNING("doesn't support import memories and tables for "
"now, ignore them");
break;
}
default:
{
UNREACHABLE();
break;
}
}
}
return true;
failed:
LOG_DEBUG("%s failed", __FUNCTION__);
return false;
}
wasm_instance_t *
wasm_instance_new(wasm_store_t *store, const wasm_module_t *module,
const wasm_extern_vec_t *imports, own wasm_trap_t **trap)
@ -4719,57 +4785,6 @@ wasm_instance_new(wasm_store_t *store, const wasm_module_t *module,
KILOBYTE(32), KILOBYTE(32));
}
static bool
compare_imports(const wasm_module_t *module, const wasm_extern_vec_t *imports)
{
unsigned import_func_count = 0;
unsigned import_global_count = 0;
unsigned import_memory_count = 0;
unsigned import_table_count = 0;
unsigned i = 0;
for (i = 0; imports && i < imports->num_elems; i++) {
wasm_extern_t *import = imports->data[i];
switch (wasm_extern_kind(import)) {
case WASM_EXTERN_FUNC:
import_func_count++;
break;
case WASM_EXTERN_GLOBAL:
import_global_count++;
break;
case WASM_EXTERN_MEMORY:
import_memory_count++;
break;
case WASM_EXTERN_TABLE:
import_table_count++;
break;
default:
UNREACHABLE();
return false;
}
}
#if WASM_ENABLE_INTERP != 0
if ((*module)->module_type == Wasm_Module_Bytecode)
return import_func_count == MODULE_INTERP(module)->import_function_count
&& import_global_count
== MODULE_INTERP(module)->import_global_count
&& import_memory_count
== MODULE_INTERP(module)->import_memory_count
&& import_table_count
== MODULE_INTERP(module)->import_table_count;
#endif
#if WASM_ENABLE_AOT != 0
if ((*module)->module_type == Wasm_Module_AoT)
return import_func_count == MODULE_AOT(module)->import_func_count
&& import_global_count == MODULE_AOT(module)->import_global_count
&& import_memory_count == MODULE_AOT(module)->import_memory_count
&& import_table_count == MODULE_AOT(module)->import_table_count;
#endif
return false;
}
wasm_instance_t *
wasm_instance_new_with_args(wasm_store_t *store, const wasm_module_t *module,
const wasm_extern_vec_t *imports,
@ -4779,7 +4794,6 @@ wasm_instance_new_with_args(wasm_store_t *store, const wasm_module_t *module,
char sub_error_buf[128] = { 0 };
char error_buf[256] = { 0 };
wasm_instance_t *instance = NULL;
WASMModuleInstance *inst_rt;
CApiFuncImport *func_import = NULL, **p_func_imports = NULL;
uint32 i = 0, import_func_count = 0;
uint64 total_size;
@ -4790,11 +4804,9 @@ wasm_instance_new_with_args(wasm_store_t *store, const wasm_module_t *module,
if (!module)
return NULL;
if (!compare_imports(module, imports)) {
snprintf(sub_error_buf, sizeof(sub_error_buf),
"Failed to match imports");
goto failed;
}
/*
* will do the check at the end of wasm_runtime_instantiate
*/
WASM_C_DUMP_PROC_MEM();
@ -4805,43 +4817,17 @@ wasm_instance_new_with_args(wasm_store_t *store, const wasm_module_t *module,
goto failed;
}
/* link module and imports */
if (imports && imports->num_elems) {
bool link = false;
#if WASM_ENABLE_INTERP != 0
if ((*module)->module_type == Wasm_Module_Bytecode) {
if (!interp_link(instance, MODULE_INTERP(module),
(wasm_extern_t **)imports->data)) {
/* executes the instantiate-time linking if provided */
if (imports) {
if (!do_link(instance, module, imports)) {
snprintf(sub_error_buf, sizeof(sub_error_buf),
"Failed to validate imports");
goto failed;
}
link = true;
}
#endif
#if WASM_ENABLE_AOT != 0
if ((*module)->module_type == Wasm_Module_AoT) {
if (!aot_link(instance, MODULE_AOT(module),
(wasm_extern_t **)imports->data)) {
snprintf(sub_error_buf, sizeof(sub_error_buf),
"Failed to validate imports");
goto failed;
}
link = true;
}
#endif
/*
* a wrong combination of module filetype and compilation flags
* also leads to below branch
* will do the linking result check at the end of wasm_runtime_instantiate
*/
if (!link) {
snprintf(sub_error_buf, sizeof(sub_error_buf),
"Failed to verify import count");
goto failed;
}
}
instance->inst_comm_rt = wasm_runtime_instantiate(
*module, stack_size, heap_size, sub_error_buf, sizeof(sub_error_buf));
@ -4856,18 +4842,22 @@ wasm_instance_new_with_args(wasm_store_t *store, const wasm_module_t *module,
}
/* create the c-api func import list */
inst_rt = (WASMModuleInstance *)instance->inst_comm_rt;
#if WASM_ENABLE_INTERP != 0
if (instance->inst_comm_rt->module_type == Wasm_Module_Bytecode) {
p_func_imports = &inst_rt->e->c_api_func_imports;
import_func_count = inst_rt->module->import_function_count;
WASMModuleInstanceExtra *e =
((WASMModuleInstance *)instance->inst_comm_rt)->e;
p_func_imports = &(e->c_api_func_imports);
import_func_count = MODULE_INTERP(module)->import_function_count;
}
#endif
#if WASM_ENABLE_AOT != 0
if (instance->inst_comm_rt->module_type == Wasm_Module_AoT) {
p_func_imports =
&((AOTModuleInstanceExtra *)inst_rt->e)->c_api_func_imports;
import_func_count = ((AOTModule *)inst_rt->module)->import_func_count;
AOTModuleInstanceExtra *e =
(AOTModuleInstanceExtra *)((AOTModuleInstance *)
instance->inst_comm_rt)
->e;
p_func_imports = &(e->c_api_func_imports);
import_func_count = MODULE_AOT(module)->import_func_count;
}
#endif
bh_assert(p_func_imports);
@ -4880,16 +4870,21 @@ wasm_instance_new_with_args(wasm_store_t *store, const wasm_module_t *module,
goto failed;
}
/* fill in c-api func import list */
/* fill in module_inst->e->c_api_func_imports */
for (i = 0; imports && i < imports->num_elems; i++) {
wasm_func_t *func_host;
wasm_extern_t *in;
wasm_func_t *func_host = NULL;
wasm_extern_t *in = imports->data[i];
bh_assert(in);
in = imports->data[i];
if (wasm_extern_kind(in) != WASM_EXTERN_FUNC)
continue;
func_host = wasm_extern_as_func(in);
/* it is a placeholder and let's skip it*/
if (!func_host->type) {
func_import++;
continue;
}
func_import->with_env_arg = func_host->with_env;
if (func_host->with_env) {
@ -4900,6 +4895,7 @@ wasm_instance_new_with_args(wasm_store_t *store, const wasm_module_t *module,
func_import->func_ptr_linked = func_host->u.cb;
func_import->env_arg = NULL;
}
bh_assert(func_import->func_ptr_linked);
func_import++;
}
@ -4907,6 +4903,8 @@ wasm_instance_new_with_args(wasm_store_t *store, const wasm_module_t *module,
/* fill with inst */
for (i = 0; imports && imports->data && i < imports->num_elems; ++i) {
wasm_extern_t *import = imports->data[i];
bh_assert(import);
switch (import->kind) {
case WASM_EXTERN_FUNC:
wasm_extern_as_func(import)->inst_comm_rt =
@ -5002,7 +5000,7 @@ failed:
sub_error_buf);
if (trap != NULL) {
wasm_message_t message = { 0 };
wasm_name_new_from_string(&message, error_buf);
wasm_name_new_from_string_nt(&message, error_buf);
*trap = wasm_trap_new(store, &message);
wasm_byte_vec_delete(&message);
}
@ -5202,3 +5200,16 @@ BASIC_FOUR_LIST(WASM_EXTERN_AS_OTHER_CONST)
BASIC_FOUR_LIST(WASM_OTHER_AS_EXTERN_CONST)
#undef WASM_OTHER_AS_EXTERN_CONST
wasm_extern_t *
wasm_extern_new_empty(wasm_store_t *store, wasm_externkind_t extern_kind)
{
if (extern_kind == WASM_EXTERN_FUNC)
return wasm_func_as_extern(wasm_func_new_empty(store));
if (extern_kind == WASM_EXTERN_GLOBAL)
return wasm_global_as_extern(wasm_global_new_empty(store));
LOG_ERROR("Don't support linking table and memory for now");
return NULL;
}

View File

@ -211,6 +211,7 @@ wasm_exec_env_set_thread_info(WASMExecEnv *exec_env)
exec_env->handle = os_self_thread();
exec_env->native_stack_boundary =
stack_boundary ? stack_boundary + WASM_STACK_GUARD_SIZE : NULL;
exec_env->native_stack_top_min = (void *)UINTPTR_MAX;
}
#if WASM_ENABLE_THREAD_MGR != 0

View File

@ -84,6 +84,12 @@ typedef struct WASMExecEnv {
void **native_symbol;
#endif
/*
* The lowest stack pointer value observed.
* Assumption: native stack grows to the lower address.
*/
uint8 *native_stack_top_min;
#if WASM_ENABLE_FAST_JIT != 0
/**
* Cache for
@ -165,6 +171,17 @@ typedef struct WASMExecEnv {
} wasm_stack;
} WASMExecEnv;
#if WASM_ENABLE_MEMORY_PROFILING != 0
#define RECORD_STACK_USAGE(e, p) \
do { \
if ((e)->native_stack_top_min > (p)) { \
(e)->native_stack_top_min = (p); \
} \
} while (0)
#else
#define RECORD_STACK_USAGE(e, p) (void)0
#endif
WASMExecEnv *
wasm_exec_env_create_internal(struct WASMModuleInstanceCommon *module_inst,
uint32 stack_size);

View File

@ -250,6 +250,10 @@ lookup_symbol(NativeSymbol *native_symbols, uint32 n_native_symbols,
return NULL;
}
/**
* allow func_type and all outputs, like p_signature, p_attachment and
* p_call_conv_raw to be NULL
*/
void *
wasm_native_resolve_symbol(const char *module_name, const char *field_name,
const WASMType *func_type, const char **p_signature,
@ -275,10 +279,13 @@ wasm_native_resolve_symbol(const char *module_name, const char *field_name,
node = node_next;
}
if (!p_signature || !p_attachment || !p_call_conv_raw)
return func_ptr;
if (func_ptr) {
if (signature && signature[0] != '\0') {
/* signature is not empty, check its format */
if (!check_symbol_signature(func_type, signature)) {
if (!func_type || !check_symbol_signature(func_type, signature)) {
#if WASM_ENABLE_WAMR_COMPILER == 0
/* Output warning except running aot compiler */
LOG_WARNING("failed to check signature '%s' and resolve "

View File

@ -7,6 +7,7 @@
#include "bh_common.h"
#include "bh_assert.h"
#include "bh_log.h"
#include "wasm_native.h"
#include "wasm_runtime_common.h"
#include "wasm_memory.h"
#if WASM_ENABLE_INTERP != 0
@ -128,6 +129,12 @@ runtime_malloc(uint64 size, WASMModuleInstanceCommon *module_inst,
static JitCompOptions jit_options = { 0 };
#endif
#if WASM_ENABLE_JIT != 0
static LLVMJITOptions llvm_jit_options = { 3, 3 };
#endif
static RunningMode runtime_running_mode = Mode_Default;
#ifdef OS_ENABLE_HW_BOUND_CHECK
/* The exec_env of thread local storage, set before calling function
and used in signal handler, as we cannot get it from the argument
@ -514,6 +521,20 @@ wasm_runtime_destroy()
wasm_runtime_memory_destroy();
}
RunningMode
wasm_runtime_get_default_running_mode(void)
{
return runtime_running_mode;
}
#if WASM_ENABLE_JIT != 0
LLVMJITOptions
wasm_runtime_get_llvm_jit_options(void)
{
return llvm_jit_options;
}
#endif
bool
wasm_runtime_full_init(RuntimeInitArgs *init_args)
{
@ -521,10 +542,20 @@ wasm_runtime_full_init(RuntimeInitArgs *init_args)
&init_args->mem_alloc_option))
return false;
if (!wasm_runtime_set_default_running_mode(init_args->running_mode)) {
wasm_runtime_memory_destroy();
return false;
}
#if WASM_ENABLE_FAST_JIT != 0
jit_options.code_cache_size = init_args->fast_jit_code_cache_size;
#endif
#if WASM_ENABLE_JIT != 0
llvm_jit_options.size_level = init_args->llvm_jit_size_level;
llvm_jit_options.opt_level = init_args->llvm_jit_opt_level;
#endif
if (!wasm_runtime_env_init()) {
wasm_runtime_memory_destroy();
return false;
@ -554,6 +585,47 @@ wasm_runtime_full_init(RuntimeInitArgs *init_args)
return true;
}
bool
wasm_runtime_is_running_mode_supported(RunningMode running_mode)
{
if (running_mode == Mode_Default) {
return true;
}
else if (running_mode == Mode_Interp) {
#if WASM_ENABLE_INTERP != 0
return true;
#endif
}
else if (running_mode == Mode_Fast_JIT) {
#if WASM_ENABLE_FAST_JIT != 0
return true;
#endif
}
else if (running_mode == Mode_LLVM_JIT) {
#if WASM_ENABLE_JIT != 0
return true;
#endif
}
else if (running_mode == Mode_Multi_Tier_JIT) {
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0
return true;
#endif
}
return false;
}
bool
wasm_runtime_set_default_running_mode(RunningMode running_mode)
{
if (wasm_runtime_is_running_mode_supported(running_mode)) {
runtime_running_mode = running_mode;
return true;
}
return false;
}
PackageType
get_package_type(const uint8 *buf, uint32 size)
{
@ -1171,6 +1243,41 @@ wasm_runtime_deinstantiate_internal(WASMModuleInstanceCommon *module_inst,
#endif
}
bool
wasm_runtime_set_running_mode(wasm_module_inst_t module_inst,
RunningMode running_mode)
{
#if WASM_ENABLE_AOT != 0
if (module_inst->module_type == Wasm_Module_AoT)
return true;
#endif
#if WASM_ENABLE_INTERP != 0
if (module_inst->module_type == Wasm_Module_Bytecode) {
WASMModuleInstance *module_inst_interp =
(WASMModuleInstance *)module_inst;
return wasm_set_running_mode(module_inst_interp, running_mode);
}
#endif
return false;
}
RunningMode
wasm_runtime_get_running_mode(wasm_module_inst_t module_inst)
{
#if WASM_ENABLE_INTERP != 0
if (module_inst->module_type == Wasm_Module_Bytecode) {
WASMModuleInstance *module_inst_interp =
(WASMModuleInstance *)module_inst;
return module_inst_interp->e->running_mode;
}
#endif
return Mode_Default;
}
void
wasm_runtime_deinstantiate(WASMModuleInstanceCommon *module_inst)
{
@ -1399,6 +1506,22 @@ wasm_runtime_dump_mem_consumption(WASMExecEnv *exec_env)
else
os_printf("Total aux stack used: no enough info to profile\n");
/*
* Report the native stack usage estimation.
*
* Unlike the aux stack above, we report the amount unused
* because we don't know the stack "bottom".
*
* Note that this is just about what the runtime itself observed.
* It doesn't cover host func implementations, signal handlers, etc.
*/
if (exec_env->native_stack_top_min != (void *)UINTPTR_MAX)
os_printf("Native stack left: %zd\n",
exec_env->native_stack_top_min
- exec_env->native_stack_boundary);
else
os_printf("Native stack left: no enough info to profile\n");
os_printf("Total app heap used: %u\n", app_heap_peak_size);
}
#endif /* end of (WASM_ENABLE_MEMORY_PROFILING != 0) \
@ -2195,6 +2318,12 @@ wasm_set_exception(WASMModuleInstance *module_inst, const char *exception)
if (exec_env) {
wasm_cluster_spread_exception(exec_env, exception ? false : true);
}
#if WASM_ENABLE_SHARED_MEMORY
if (exception) {
notify_stale_threads_on_exception(
(WASMModuleInstanceCommon *)module_inst);
}
#endif
#else
(void)exec_env;
#endif
@ -2219,9 +2348,7 @@ static const char *exception_msgs[] = {
"wasm auxiliary stack underflow", /* EXCE_AUX_STACK_UNDERFLOW */
"out of bounds table access", /* EXCE_OUT_OF_BOUNDS_TABLE_ACCESS */
"wasm operand stack overflow", /* EXCE_OPERAND_STACK_OVERFLOW */
#if WASM_ENABLE_FAST_JIT != 0
"failed to compile fast jit function", /* EXCE_FAILED_TO_COMPILE_FAST_JIT_FUNC */
#endif
"", /* EXCE_ALREADY_THROWN */
};
/* clang-format on */
@ -5205,3 +5332,24 @@ wasm_runtime_get_version(uint32_t *major, uint32_t *minor, uint32_t *patch)
*minor = WAMR_VERSION_MINOR;
*patch = WAMR_VERSION_PATCH;
}
bool
wasm_runtime_is_import_func_linked(const char *module_name,
const char *func_name)
{
return wasm_native_resolve_symbol(module_name, func_name, NULL, NULL, NULL,
NULL);
}
bool
wasm_runtime_is_import_global_linked(const char *module_name,
const char *global_name)
{
#if WASM_ENABLE_LIBC_BUILTIN != 0
WASMGlobalImport global = { 0 };
return wasm_native_lookup_libc_builtin_global(module_name, global_name,
&global);
#else
return false;
#endif
}

View File

@ -25,6 +25,9 @@
extern "C" {
#endif
/* Internal use for setting default running mode */
#define Mode_Default 0
#if WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS != 0
#define PUT_I64_TO_ADDR(addr, value) \
@ -413,6 +416,13 @@ typedef struct wasm_frame_t {
const char *func_name_wp;
} WASMCApiFrame;
#ifdef WASM_ENABLE_JIT
typedef struct LLVMJITOptions {
uint32 opt_level;
uint32 size_level;
} LLVMJITOptions;
#endif
#ifdef OS_ENABLE_HW_BOUND_CHECK
/* Signal info passing to interp/aot signal handler */
typedef struct WASMSignalInfo {
@ -437,10 +447,28 @@ wasm_runtime_get_exec_env_tls(void);
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_init(void);
/* Internal API */
RunningMode
wasm_runtime_get_default_running_mode(void);
#if WASM_ENABLE_JIT != 0
/* Internal API */
LLVMJITOptions
wasm_runtime_get_llvm_jit_options(void);
#endif
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_full_init(RuntimeInitArgs *init_args);
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_is_running_mode_supported(RunningMode running_mode);
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_set_default_running_mode(RunningMode running_mode);
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN void
wasm_runtime_destroy(void);
@ -484,6 +512,15 @@ wasm_runtime_instantiate(WASMModuleCommon *module, uint32 stack_size,
uint32 heap_size, char *error_buf,
uint32 error_buf_size);
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_set_running_mode(wasm_module_inst_t module_inst,
RunningMode running_mode);
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN RunningMode
wasm_runtime_get_running_mode(wasm_module_inst_t module_inst);
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN void
wasm_runtime_deinstantiate(WASMModuleInstanceCommon *module_inst);
@ -956,6 +993,14 @@ void
wasm_runtime_destroy_custom_sections(WASMCustomSection *section_list);
#endif
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_is_import_func_linked(const char *module_name,
const char *func_name);
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_is_import_global_linked(const char *module_name,
const char *global_name);
#ifdef __cplusplus
}
#endif

View File

@ -30,6 +30,11 @@ typedef struct AtomicWaitNode {
korp_cond wait_cond;
} AtomicWaitNode;
typedef struct AtomicWaitAddressArgs {
uint32 index;
void **addr;
} AtomicWaitAddressArgs;
/* Atomic wait map */
static HashMap *wait_map;
@ -87,6 +92,61 @@ search_module(WASMModuleCommon *module)
return NULL;
}
static void
wait_map_address_count_callback(void *key, void *value,
void *p_total_elem_count)
{
*(uint32 *)p_total_elem_count = *(uint32 *)p_total_elem_count + 1;
}
static void
create_list_of_waiter_addresses(void *key, void *value, void *user_data)
{
AtomicWaitAddressArgs *data = (AtomicWaitAddressArgs *)user_data;
data->addr[data->index++] = key;
}
void
notify_stale_threads_on_exception(WASMModuleInstanceCommon *module_inst)
{
AtomicWaitAddressArgs args = { 0 };
uint32 i = 0, total_elem_count = 0;
uint64 total_elem_count_size = 0;
os_mutex_lock(&shared_memory_list_lock);
/* count number of addresses in wait_map */
bh_hash_map_traverse(wait_map, wait_map_address_count_callback,
(void *)&total_elem_count);
if (!total_elem_count) {
os_mutex_unlock(&shared_memory_list_lock);
return;
}
/* allocate memory */
total_elem_count_size = (uint64)sizeof(void *) * total_elem_count;
if (total_elem_count_size >= UINT32_MAX
|| !(args.addr = wasm_runtime_malloc((uint32)total_elem_count_size))) {
LOG_ERROR(
"failed to allocate memory for list of atomic wait addresses");
os_mutex_unlock(&shared_memory_list_lock);
return;
}
/* set values in list of addresses */
bh_hash_map_traverse(wait_map, create_list_of_waiter_addresses, &args);
os_mutex_unlock(&shared_memory_list_lock);
/* notify */
for (i = 0; i < args.index; i++) {
wasm_runtime_atomic_notify(module_inst, args.addr[i], UINT32_MAX);
}
/* free memory allocated to args data */
wasm_runtime_free(args.addr);
}
WASMSharedMemNode *
wasm_module_get_shared_memory(WASMModuleCommon *module)
{
@ -120,6 +180,7 @@ shared_memory_dec_reference(WASMModuleCommon *module)
bh_list_remove(shared_memory_list, node);
os_mutex_unlock(&shared_memory_list_lock);
os_mutex_destroy(&node->shared_mem_lock);
os_mutex_destroy(&node->lock);
wasm_runtime_free(node);
}
@ -148,7 +209,14 @@ shared_memory_set_memory_inst(WASMModuleCommon *module,
node->module = module;
node->memory_inst = memory;
node->ref_count = 1;
if (os_mutex_init(&node->shared_mem_lock) != 0) {
wasm_runtime_free(node);
return NULL;
}
if (os_mutex_init(&node->lock) != 0) {
os_mutex_destroy(&node->shared_mem_lock);
wasm_runtime_free(node);
return NULL;
}
@ -322,6 +390,10 @@ wasm_runtime_atomic_wait(WASMModuleInstanceCommon *module, void *address,
bh_assert(module->module_type == Wasm_Module_Bytecode
|| module->module_type == Wasm_Module_AoT);
if (wasm_get_exception(module_inst)) {
return -1;
}
/* Currently we have only one memory instance */
if (!module_inst->memories[0]->is_shared) {
wasm_runtime_set_exception(module, "expected shared memory");

View File

@ -26,6 +26,8 @@ typedef struct WASMSharedMemNode {
WASMModuleCommon *module;
/* The memory information */
WASMMemoryInstanceCommon *memory_inst;
/* Lock used for atomic operations */
korp_mutex shared_mem_lock;
/* reference count */
uint32 ref_count;
@ -37,6 +39,9 @@ wasm_shared_memory_init();
void
wasm_shared_memory_destroy();
void
notify_stale_threads_on_exception(WASMModuleInstanceCommon *module);
WASMSharedMemNode *
wasm_module_get_shared_memory(WASMModuleCommon *module);

View File

@ -259,6 +259,7 @@ check_type_compatible(uint8 src_type, uint8 dst_type)
#define I32_SIX LLVM_CONST(i32_six)
#define I32_SEVEN LLVM_CONST(i32_seven)
#define I32_EIGHT LLVM_CONST(i32_eight)
#define I32_NINE LLVM_CONST(i32_nine)
#define I32_NEG_ONE LLVM_CONST(i32_neg_one)
#define I64_NEG_ONE LLVM_CONST(i64_neg_one)
#define I32_MIN LLVM_CONST(i32_min)

View File

@ -366,6 +366,87 @@ fail:
#endif /* end of (WASM_ENABLE_DUMP_CALL_STACK != 0) \
|| (WASM_ENABLE_PERF_PROFILING != 0) */
static bool
record_stack_usage(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 callee_cell_num)
{
LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
LLVMBasicBlockRef block_update;
LLVMBasicBlockRef block_after_update;
LLVMValueRef callee_local_size, new_sp, cmp;
LLVMValueRef native_stack_top_min;
LLVMTypeRef ptrdiff_type;
if (comp_ctx->pointer_size == sizeof(uint64_t)) {
ptrdiff_type = I64_TYPE;
}
else {
ptrdiff_type = I32_TYPE;
}
/*
* new_sp = last_alloca - callee_local_size;
* if (*native_stack_top_min_addr > new_sp) {
* *native_stack_top_min_addr = new_sp;
* }
*/
if (!(callee_local_size = LLVMConstInt(
ptrdiff_type, -(int64_t)callee_cell_num * 4, true))) {
aot_set_last_error("llvm build const failed.");
return false;
}
if (!(new_sp = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
func_ctx->last_alloca,
&callee_local_size, 1, "new_sp"))) {
aot_set_last_error("llvm build gep failed");
return false;
}
if (!(native_stack_top_min = LLVMBuildLoad2(
comp_ctx->builder, OPQ_PTR_TYPE,
func_ctx->native_stack_top_min_addr, "native_stack_top_min"))) {
aot_set_last_error("llvm build load failed");
return false;
}
if (!(cmp = LLVMBuildICmp(comp_ctx->builder, LLVMIntULT, new_sp,
native_stack_top_min, "cmp"))) {
aot_set_last_error("llvm build icmp failed.");
return false;
}
if (!(block_update = LLVMAppendBasicBlockInContext(
comp_ctx->context, func_ctx->func, "block_update"))) {
aot_set_last_error("llvm add basic block failed.");
return false;
}
if (!(block_after_update = LLVMAppendBasicBlockInContext(
comp_ctx->context, func_ctx->func, "block_after_update"))) {
aot_set_last_error("llvm add basic block failed.");
return false;
}
LLVMMoveBasicBlockAfter(block_update, block_curr);
LLVMMoveBasicBlockAfter(block_after_update, block_update);
if (!LLVMBuildCondBr(comp_ctx->builder, cmp, block_update,
block_after_update)) {
aot_set_last_error("llvm build cond br failed.");
return false;
}
LLVMPositionBuilderAtEnd(comp_ctx->builder, block_update);
if (!LLVMBuildStore(comp_ctx->builder, new_sp,
func_ctx->native_stack_top_min_addr)) {
aot_set_last_error("llvm build store failed");
return false;
}
if (!LLVMBuildBr(comp_ctx->builder, block_after_update)) {
aot_set_last_error("llvm build br failed.");
return false;
}
LLVMPositionBuilderAtEnd(comp_ctx->builder, block_after_update);
return true;
}
static bool
check_stack_boundary(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 callee_cell_num)
@ -409,6 +490,19 @@ check_stack_boundary(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
return true;
}
static bool
check_stack(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 callee_cell_num)
{
if (comp_ctx->enable_stack_estimation
&& !record_stack_usage(comp_ctx, func_ctx, callee_cell_num))
return false;
if (comp_ctx->enable_stack_bound_check
&& !check_stack_boundary(comp_ctx, func_ctx, callee_cell_num))
return false;
return true;
}
/**
* Check whether the app address and its buffer are inside the linear memory,
* if no, throw exception
@ -852,8 +946,7 @@ aot_compile_op_call(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
callee_cell_num =
aot_func->param_cell_num + aot_func->local_cell_num + 1;
if (comp_ctx->enable_stack_bound_check
&& !check_stack_boundary(comp_ctx, func_ctx, callee_cell_num))
if (!check_stack(comp_ctx, func_ctx, callee_cell_num))
goto fail;
#if LLVM_VERSION_MAJOR >= 14
@ -1467,8 +1560,7 @@ aot_compile_op_call_indirect(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
/* Translate call non-import block */
LLVMPositionBuilderAtEnd(comp_ctx->builder, block_call_non_import);
if (comp_ctx->enable_stack_bound_check
&& !check_stack_boundary(comp_ctx, func_ctx,
if (!check_stack(comp_ctx, func_ctx,
param_cell_num + ext_cell_num
+ 1
/* Reserve some local variables */

View File

@ -286,6 +286,21 @@ create_native_stack_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
return true;
}
static bool
create_native_stack_top_min(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
LLVMValueRef offset = I32_NINE;
if (!(func_ctx->native_stack_top_min_addr = LLVMBuildInBoundsGEP2(
comp_ctx->builder, OPQ_PTR_TYPE, func_ctx->exec_env, &offset, 1,
"native_stack_top_min_addr"))) {
aot_set_last_error("llvm build in bounds gep failed");
return false;
}
return true;
}
static bool
create_aux_stack_info(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
@ -434,7 +449,8 @@ create_local_variables(AOTCompData *comp_data, AOTCompContext *comp_ctx,
}
}
if (comp_ctx->enable_stack_bound_check) {
if (comp_ctx->enable_stack_bound_check
|| comp_ctx->enable_stack_estimation) {
if (aot_func_type->param_count + func->local_count > 0) {
func_ctx->last_alloca = func_ctx->locals[aot_func_type->param_count
+ func->local_count - 1];
@ -963,6 +979,10 @@ aot_create_func_context(AOTCompData *comp_data, AOTCompContext *comp_ctx,
&& !create_native_stack_bound(comp_ctx, func_ctx)) {
goto fail;
}
if (comp_ctx->enable_stack_estimation
&& !create_native_stack_top_min(comp_ctx, func_ctx)) {
goto fail;
}
/* Get auxiliary stack info */
if (wasm_func->has_op_set_global_aux_stack
@ -1622,6 +1642,9 @@ aot_create_comp_context(AOTCompData *comp_data, aot_comp_option_t option)
if (option->disable_llvm_lto)
comp_ctx->disable_llvm_lto = true;
if (option->enable_stack_estimation)
comp_ctx->enable_stack_estimation = true;
comp_ctx->opt_level = option->opt_level;
comp_ctx->size_level = option->size_level;

View File

@ -163,6 +163,7 @@ typedef struct AOTFuncContext {
LLVMValueRef aot_inst;
LLVMValueRef argv_buf;
LLVMValueRef native_stack_bound;
LLVMValueRef native_stack_top_min_addr;
LLVMValueRef aux_stack_bound;
LLVMValueRef aux_stack_bottom;
LLVMValueRef native_symbol;
@ -313,6 +314,9 @@ typedef struct AOTCompContext {
/* Native stack bounday Check */
bool enable_stack_bound_check;
/* Native stack usage estimation */
bool enable_stack_estimation;
/* 128-bit SIMD */
bool enable_simd;
@ -403,6 +407,7 @@ typedef struct AOTCompOption {
bool enable_aux_stack_frame;
bool disable_llvm_intrinsics;
bool disable_llvm_lto;
bool enable_stack_estimation;
uint32 opt_level;
uint32 size_level;
uint32 output_format;

View File

@ -135,7 +135,8 @@ check_and_seek(JitCompContext *cc, JitReg addr, uint32 offset, uint32 bytes)
#ifndef OS_ENABLE_HW_BOUND_CHECK
/* ---------- check ---------- */
/* 1. shortcut if the memory size is 0 */
if (0 == cc->cur_wasm_module->memories[mem_idx].init_page_count) {
if (cc->cur_wasm_module->memories != NULL
&& 0 == cc->cur_wasm_module->memories[mem_idx].init_page_count) {
JitReg module_inst, cur_page_count;
uint32 cur_page_count_offset =
(uint32)offsetof(WASMModuleInstance, global_table_data.bytes)
@ -176,6 +177,18 @@ fail:
return 0;
}
#define CHECK_ALIGNMENT(maddr, memory_data, offset1) \
do { \
GEN_INSN(ADD, maddr, memory_data, offset1); \
JitReg align_mask = NEW_CONST(I64, ((uint64)1 << align) - 1); \
JitReg AND_res = jit_cc_new_reg_I64(cc); \
GEN_INSN(AND, AND_res, maddr, align_mask); \
GEN_INSN(CMP, cc->cmp_reg, AND_res, NEW_CONST(I64, 0)); \
if (!jit_emit_exception(cc, EXCE_UNALIGNED_ATOMIC, JIT_OP_BNE, \
cc->cmp_reg, NULL)) \
goto fail; \
} while (0)
bool
jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic)
@ -779,6 +792,51 @@ bool
jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes)
{
bh_assert(op_type == VALUE_TYPE_I32 || op_type == VALUE_TYPE_I64);
// Pop atomic.wait arguments
JitReg timeout, expect, expect_64, addr;
POP_I64(timeout);
if (op_type == VALUE_TYPE_I32) {
POP_I32(expect);
expect_64 = jit_cc_new_reg_I64(cc);
GEN_INSN(I32TOI64, expect_64, expect);
}
else {
POP_I64(expect_64);
}
POP_I32(addr);
// Get referenced address and store it in `maddr`
JitReg memory_data = get_memory_data_reg(cc->jit_frame, 0);
JitReg offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1)
goto fail;
JitReg maddr = jit_cc_new_reg_I64(cc);
CHECK_ALIGNMENT(maddr, memory_data, offset1);
// Prepare `wasm_runtime_atomic_wait` arguments
JitReg res = jit_cc_new_reg_I32(cc);
JitReg args[5] = { 0 };
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = maddr;
args[2] = expect_64;
args[3] = timeout;
args[4] = NEW_CONST(I32, false);
if (!jit_emit_callnative(cc, wasm_runtime_atomic_wait, res, args,
sizeof(args) / sizeof(args[0])))
goto fail;
// Handle return code
GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, -1));
if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BEQ, cc->cmp_reg,
NULL))
goto fail;
PUSH_I32(res);
return true;
fail:
return false;
}
@ -786,6 +844,39 @@ bool
jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes)
{
// Pop atomic.notify arguments
JitReg notify_count, addr;
POP_I32(notify_count);
POP_I32(addr);
// Get referenced address and store it in `maddr`
JitReg memory_data = get_memory_data_reg(cc->jit_frame, 0);
JitReg offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1)
goto fail;
JitReg maddr = jit_cc_new_reg_I64(cc);
CHECK_ALIGNMENT(maddr, memory_data, offset1);
// Prepare `wasm_runtime_atomic_notify` arguments
JitReg res = jit_cc_new_reg_I32(cc);
JitReg args[3] = { 0 };
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = maddr;
args[2] = notify_count;
if (!jit_emit_callnative(cc, wasm_runtime_atomic_notify, res, args,
sizeof(args) / sizeof(args[0])))
goto fail;
// Handle return code
GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BLTS, cc->cmp_reg,
NULL))
goto fail;
PUSH_I32(res);
return true;
fail:
return false;
}
#endif

View File

@ -56,9 +56,31 @@ jit_code_cache_free(void *ptr)
bool
jit_pass_register_jitted_code(JitCompContext *cc)
{
uint32 jit_func_idx =
cc->cur_wasm_func_idx - cc->cur_wasm_module->import_function_count;
cc->cur_wasm_module->fast_jit_func_ptrs[jit_func_idx] =
cc->cur_wasm_func->fast_jit_jitted_code = cc->jitted_addr_begin;
WASMModuleInstance *instance;
WASMModule *module = cc->cur_wasm_module;
WASMFunction *func = cc->cur_wasm_func;
uint32 jit_func_idx = cc->cur_wasm_func_idx - module->import_function_count;
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0
os_mutex_lock(&module->instance_list_lock);
#endif
module->fast_jit_func_ptrs[jit_func_idx] = func->fast_jit_jitted_code =
cc->jitted_addr_begin;
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0
instance = module->instance_list;
while (instance) {
if (instance->e->running_mode == Mode_Fast_JIT)
instance->fast_jit_func_ptrs[jit_func_idx] = cc->jitted_addr_begin;
instance = instance->e->next;
}
os_mutex_unlock(&module->instance_list_lock);
#else
(void)instance;
#endif
return true;
}

View File

@ -157,8 +157,16 @@ jit_compiler_compile(WASMModule *module, uint32 func_idx)
/* Apply compiler passes */
if (!apply_compiler_passes(cc) || jit_get_last_error(cc)) {
last_error = jit_get_last_error(cc);
#if WASM_ENABLE_CUSTOM_NAME_SECTION != 0
char *function_name = cc->cur_wasm_func->field_name;
os_printf("fast jit compilation failed: %s (function_name=%s)\n",
last_error ? last_error : "unknown error", function_name);
#else
os_printf("fast jit compilation failed: %s\n",
last_error ? last_error : "unknown error");
#endif
goto fail;
}
@ -246,6 +254,8 @@ jit_compiler_set_call_to_fast_jit(WASMModule *module, uint32 func_idx)
func_ptr = jit_codegen_compile_call_to_fast_jit(module, func_idx);
if (func_ptr) {
uint32 i = func_idx - module->import_function_count;
module->functions[i]->call_to_fast_jit_from_llvm_jit = func_ptr;
jit_compiler_set_llvm_jit_func_ptr(module, func_idx, func_ptr);
}
@ -259,11 +269,13 @@ jit_compiler_set_llvm_jit_func_ptr(WASMModule *module, uint32 func_idx,
WASMModuleInstance *instance;
uint32 i = func_idx - module->import_function_count;
module->functions[i]->llvm_jit_func_ptr = module->func_ptrs[i] = func_ptr;
os_mutex_lock(&module->instance_list_lock);
module->func_ptrs[i] = func_ptr;
instance = module->instance_list;
while (instance) {
if (instance->e->running_mode == Mode_Multi_Tier_JIT)
instance->func_ptrs[func_idx] = func_ptr;
instance = instance->e->next;
}

View File

@ -55,6 +55,7 @@ typedef struct AOTCompOption {
bool enable_aux_stack_frame;
bool disable_llvm_intrinsics;
bool disable_llvm_lto;
bool enable_stack_estimation;
uint32_t opt_level;
uint32_t size_level;
uint32_t output_format;

View File

@ -354,6 +354,7 @@ WASM_API_EXTERN own wasm_importtype_t* wasm_importtype_new(
WASM_API_EXTERN const wasm_name_t* wasm_importtype_module(const wasm_importtype_t*);
WASM_API_EXTERN const wasm_name_t* wasm_importtype_name(const wasm_importtype_t*);
WASM_API_EXTERN const wasm_externtype_t* wasm_importtype_type(const wasm_importtype_t*);
WASM_API_EXTERN bool wasm_importtype_is_linked(const wasm_importtype_t*);
// Export Types
@ -797,6 +798,9 @@ static inline void* wasm_val_ptr(const wasm_val_t* val) {
#define KILOBYTE(n) ((n) * 1024)
// Create placeholders filled in `wasm_externvec_t* imports` for `wasm_instance_new()`
WASM_API_EXTERN wasm_extern_t *wasm_extern_new_empty(wasm_store_t *, wasm_externkind_t);
///////////////////////////////////////////////////////////////////////////////
#undef own

View File

@ -131,6 +131,14 @@ typedef struct mem_alloc_info_t {
uint32_t highmark_size;
} mem_alloc_info_t;
/* Running mode of runtime and module instance*/
typedef enum RunningMode {
Mode_Interp = 1,
Mode_Fast_JIT,
Mode_LLVM_JIT,
Mode_Multi_Tier_JIT,
} RunningMode;
/* WASM runtime initialize arguments */
typedef struct RuntimeInitArgs {
mem_alloc_type_t mem_alloc_type;
@ -152,6 +160,13 @@ typedef struct RuntimeInitArgs {
/* Fast JIT code cache size */
uint32_t fast_jit_code_cache_size;
/* Default running mode of the runtime */
RunningMode running_mode;
/* LLVM JIT opt and size level */
uint32_t llvm_jit_opt_level;
uint32_t llvm_jit_size_level;
} RuntimeInitArgs;
#ifndef WASM_VALKIND_T_DEFINED
@ -195,9 +210,9 @@ WASM_RUNTIME_API_EXTERN bool
wasm_runtime_init(void);
/**
* Initialize the WASM runtime environment, and also initialize
* the memory allocator and register native symbols, which are specified
* with init arguments
* Initialize the WASM runtime environment, WASM running mode,
* and also initialize the memory allocator and register native symbols,
* which are specified with init arguments
*
* @param init_args specifies the init arguments
*
@ -206,6 +221,28 @@ wasm_runtime_init(void);
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_full_init(RuntimeInitArgs *init_args);
/**
* Query whether a certain running mode is supported for the runtime
*
* @param running_mode the running mode to query
*
* @return true if this running mode is supported, false otherwise
*/
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_is_running_mode_supported(RunningMode running_mode);
/**
* Set the default running mode for the runtime. It is inherited
* to set the running mode of a module instance when it is instantiated,
* and can be changed by calling wasm_runtime_set_running_mode
*
* @param running_mode the running mode to set
*
* @return true if success, false otherwise
*/
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_set_default_running_mode(RunningMode running_mode);
/**
* Destroy the WASM runtime environment.
*/
@ -450,6 +487,34 @@ wasm_runtime_instantiate(const wasm_module_t module,
uint32_t stack_size, uint32_t heap_size,
char *error_buf, uint32_t error_buf_size);
/**
* Set the running mode of a WASM module instance, override the
* default running mode of the runtime. Note that it only makes sense when
* the input is a wasm bytecode file: for the AOT file, runtime always runs
* it with AOT engine, and this function always returns true.
*
* @param module_inst the WASM module instance to set running mode
* @param running_mode the running mode to set
*
* @return true if success, false otherwise
*/
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_set_running_mode(wasm_module_inst_t module_inst,
RunningMode running_mode);
/**
* Get the running mode of a WASM module instance, if no running mode
* is explicitly set the default running mode of runtime will
* be used and returned. Note that it only makes sense when the input is a
* wasm bytecode file: for the AOT file, this function always returns 0.
*
* @param module_inst the WASM module instance to query for running mode
*
* @return the running mode this module instance currently use
*/
WASM_RUNTIME_API_EXTERN RunningMode
wasm_runtime_get_running_mode(wasm_module_inst_t module_inst);
/**
* Deinstantiate a WASM module instance, destroy the resources.
*
@ -1259,6 +1324,22 @@ wasm_runtime_get_custom_section(wasm_module_t const module_comm,
*/
WASM_RUNTIME_API_EXTERN void
wasm_runtime_get_version(uint32_t *major, uint32_t *minor, uint32_t *patch);
/**
* Check whether an import func `(import <module_name> <func_name> (func ...))` is linked or not
* with runtime registered natvie functions
*/
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_is_import_func_linked(const char *module_name,
const char *func_name);
/**
* Check whether an import global `(import <module_name> <global_name> (global ...))` is linked or not
* with runtime registered natvie globals
*/
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_is_import_global_linked(const char *module_name,
const char *global_name);
/* clang-format on */
#ifdef __cplusplus

View File

@ -278,9 +278,14 @@ struct WASMFunction {
#endif
#if WASM_ENABLE_FAST_JIT != 0
/* The compiled fast jit jitted code block of this function */
void *fast_jit_jitted_code;
#if WASM_ENABLE_JIT != 0 && WASM_ENABLE_LAZY_JIT != 0
/* The compiled llvm jit func ptr of this function */
void *llvm_jit_func_ptr;
/* Code block to call fast jit jitted code of this function
from the llvm jit jitted code */
void *call_to_fast_jit_from_llvm_jit;
#endif
#endif
};
@ -506,14 +511,13 @@ struct WASMModule {
#endif
#if WASM_ENABLE_DEBUG_INTERP != 0 \
|| (WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT \
|| (WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0)
/**
* List of instances referred to this module. When source debugging
* feature is enabled, the debugger may modify the code section of
* the module, so we need to report a warning if user create several
* instances based on the same module. Sub instances created by
* lib-pthread or spawn API won't be added into the list.
* instances based on the same module.
*
* Also add the instance to the list for Fast JIT to LLVM JIT
* tier-up, since we need to lazily update the LLVM func pointers
@ -533,7 +537,22 @@ struct WASMModule {
#endif
#if WASM_ENABLE_FAST_JIT != 0
/* func pointers of Fast JITed (un-imported) functions */
/**
* func pointers of Fast JITed (un-imported) functions
* for non Multi-Tier JIT mode:
* (1) when lazy jit is disabled, each pointer is set to the compiled
* fast jit jitted code
* (2) when lazy jit is enabled, each pointer is firstly inited as
* jit_global->compile_fast_jit_and_then_call, and then set to the
* compiled fast jit jitted code when it is called (the stub will
* compile the jit function and then update itself)
* for Multi-Tier JIT mode:
* each pointer is firstly inited as compile_fast_jit_and_then_call,
* and then set to the compiled fast jit jitted code when it is called,
* and when the llvm jit func ptr of the same function is compiled, it
* will be set to call_to_llvm_jit_from_fast_jit of this function type
* (tier-up from fast-jit to llvm-jit)
*/
void **fast_jit_func_ptrs;
/* locks for Fast JIT lazy compilation */
korp_mutex fast_jit_thread_locks[WASM_ORC_JIT_BACKEND_THREAD_NUM];
@ -543,7 +562,16 @@ struct WASMModule {
#if WASM_ENABLE_JIT != 0
struct AOTCompData *comp_data;
struct AOTCompContext *comp_ctx;
/* func pointers of LLVM JITed (un-imported) functions */
/**
* func pointers of LLVM JITed (un-imported) functions
* for non Multi-Tier JIT mode:
* each pointer is set to the lookuped llvm jit func ptr, note that it
* is a stub and will trigger the actual compilation when it is called
* for Multi-Tier JIT mode:
* each pointer is inited as call_to_fast_jit code block, when the llvm
* jit func ptr is actually compiled, it is set to the compiled llvm jit
* func ptr
*/
void **func_ptrs;
/* whether the func pointers are compiled */
bool *func_ptrs_compiled;
@ -568,6 +596,12 @@ struct WASMModule {
korp_tid llvm_jit_init_thread;
/* whether the llvm jit is initialized */
bool llvm_jit_inited;
/* Whether to enable llvm jit compilation:
it is set to true only when there is a module instance starts to
run with running mode Mode_LLVM_JIT or Mode_Multi_Tier_JIT,
since no need to enable llvm jit compilation for Mode_Interp and
Mode_Fast_JIT, so as to improve performance for them */
bool enable_llvm_jit_compilation;
#endif
};

View File

@ -696,28 +696,28 @@ trunc_f64_to_int(WASMModuleInstance *module, uint32 *frame_sp, float64 src_min,
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 1, maddr); \
CHECK_ATOMIC_MEMORY_ACCESS(); \
\
os_mutex_lock(&module->e->mem_lock); \
os_mutex_lock(&node->shared_mem_lock); \
readv = (uint32)(*(uint8 *)maddr); \
*(uint8 *)maddr = (uint8)(readv op sval); \
os_mutex_unlock(&module->e->mem_lock); \
os_mutex_unlock(&node->shared_mem_lock); \
} \
else if (opcode == WASM_OP_ATOMIC_RMW_I32_##OP_NAME##16_U) { \
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr); \
CHECK_ATOMIC_MEMORY_ACCESS(); \
\
os_mutex_lock(&module->e->mem_lock); \
os_mutex_lock(&node->shared_mem_lock); \
readv = (uint32)LOAD_U16(maddr); \
STORE_U16(maddr, (uint16)(readv op sval)); \
os_mutex_unlock(&module->e->mem_lock); \
os_mutex_unlock(&node->shared_mem_lock); \
} \
else { \
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr); \
CHECK_ATOMIC_MEMORY_ACCESS(); \
\
os_mutex_lock(&module->e->mem_lock); \
os_mutex_lock(&node->shared_mem_lock); \
readv = LOAD_I32(maddr); \
STORE_U32(maddr, readv op sval); \
os_mutex_unlock(&module->e->mem_lock); \
os_mutex_unlock(&node->shared_mem_lock); \
} \
PUSH_I32(readv); \
break; \
@ -736,39 +736,39 @@ trunc_f64_to_int(WASMModuleInstance *module, uint32 *frame_sp, float64 src_min,
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 1, maddr); \
CHECK_ATOMIC_MEMORY_ACCESS(); \
\
os_mutex_lock(&module->e->mem_lock); \
os_mutex_lock(&node->shared_mem_lock); \
readv = (uint64)(*(uint8 *)maddr); \
*(uint8 *)maddr = (uint8)(readv op sval); \
os_mutex_unlock(&module->e->mem_lock); \
os_mutex_unlock(&node->shared_mem_lock); \
} \
else if (opcode == WASM_OP_ATOMIC_RMW_I64_##OP_NAME##16_U) { \
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr); \
CHECK_ATOMIC_MEMORY_ACCESS(); \
\
os_mutex_lock(&module->e->mem_lock); \
os_mutex_lock(&node->shared_mem_lock); \
readv = (uint64)LOAD_U16(maddr); \
STORE_U16(maddr, (uint16)(readv op sval)); \
os_mutex_unlock(&module->e->mem_lock); \
os_mutex_unlock(&node->shared_mem_lock); \
} \
else if (opcode == WASM_OP_ATOMIC_RMW_I64_##OP_NAME##32_U) { \
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr); \
CHECK_ATOMIC_MEMORY_ACCESS(); \
\
os_mutex_lock(&module->e->mem_lock); \
os_mutex_lock(&node->shared_mem_lock); \
readv = (uint64)LOAD_U32(maddr); \
STORE_U32(maddr, (uint32)(readv op sval)); \
os_mutex_unlock(&module->e->mem_lock); \
os_mutex_unlock(&node->shared_mem_lock); \
} \
else { \
uint64 op_result; \
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 8, maddr); \
CHECK_ATOMIC_MEMORY_ACCESS(); \
\
os_mutex_lock(&module->e->mem_lock); \
os_mutex_lock(&node->shared_mem_lock); \
readv = (uint64)LOAD_I64(maddr); \
op_result = readv op sval; \
STORE_I64(maddr, op_result); \
os_mutex_unlock(&module->e->mem_lock); \
os_mutex_unlock(&node->shared_mem_lock); \
} \
PUSH_I64(readv); \
break; \
@ -1151,6 +1151,11 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
uint32 cache_index, type_index, param_cell_num, cell_num;
uint8 value_type;
#if WASM_ENABLE_SHARED_MEMORY != 0
WASMSharedMemNode *node =
wasm_module_get_shared_memory((WASMModuleCommon *)module->module);
#endif
#if WASM_ENABLE_DEBUG_INTERP != 0
uint8 *frame_ip_orig = NULL;
WASMDebugInstance *debug_instance = wasm_exec_env_get_instance(exec_env);
@ -3458,23 +3463,23 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
if (opcode == WASM_OP_ATOMIC_I32_LOAD8_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 1, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint32)(*(uint8 *)maddr);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_I32_LOAD16_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint32)LOAD_U16(maddr);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = LOAD_I32(maddr);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
PUSH_I32(readv);
@ -3493,30 +3498,30 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
if (opcode == WASM_OP_ATOMIC_I64_LOAD8_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 1, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)(*(uint8 *)maddr);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_I64_LOAD16_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)LOAD_U16(maddr);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_I64_LOAD32_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)LOAD_U32(maddr);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 8, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = LOAD_I64(maddr);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
PUSH_I64(readv);
@ -3535,23 +3540,23 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
if (opcode == WASM_OP_ATOMIC_I32_STORE8) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 1, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
*(uint8 *)maddr = (uint8)sval;
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_I32_STORE16) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
STORE_U16(maddr, (uint16)sval);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
STORE_U32(maddr, frame_sp[1]);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
break;
}
@ -3569,31 +3574,31 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
if (opcode == WASM_OP_ATOMIC_I64_STORE8) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 1, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
*(uint8 *)maddr = (uint8)sval;
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_I64_STORE16) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
STORE_U16(maddr, (uint16)sval);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_I64_STORE32) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
STORE_U32(maddr, (uint32)sval);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 8, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
PUT_I64_TO_ADDR((uint32 *)maddr,
GET_I64_FROM_ADDR(frame_sp + 1));
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
break;
}
@ -3613,32 +3618,32 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
CHECK_ATOMIC_MEMORY_ACCESS();
expect = (uint8)expect;
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint32)(*(uint8 *)maddr);
if (readv == expect)
*(uint8 *)maddr = (uint8)(sval);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_RMW_I32_CMPXCHG16_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
expect = (uint16)expect;
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint32)LOAD_U16(maddr);
if (readv == expect)
STORE_U16(maddr, (uint16)(sval));
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = LOAD_I32(maddr);
if (readv == expect)
STORE_U32(maddr, sval);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
PUSH_I32(readv);
break;
@ -3659,44 +3664,44 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
CHECK_ATOMIC_MEMORY_ACCESS();
expect = (uint8)expect;
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)(*(uint8 *)maddr);
if (readv == expect)
*(uint8 *)maddr = (uint8)(sval);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_RMW_I64_CMPXCHG16_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
expect = (uint16)expect;
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)LOAD_U16(maddr);
if (readv == expect)
STORE_U16(maddr, (uint16)(sval));
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_RMW_I64_CMPXCHG32_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
expect = (uint32)expect;
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)LOAD_U32(maddr);
if (readv == expect)
STORE_U32(maddr, (uint32)(sval));
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 8, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)LOAD_I64(maddr);
if (readv == expect) {
STORE_I64(maddr, sval);
}
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
PUSH_I64(readv);
break;
@ -4150,6 +4155,7 @@ wasm_interp_call_wasm(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
}
argc = function->param_cell_num;
RECORD_STACK_USAGE(exec_env, (uint8 *)&prev_frame);
#if !(defined(OS_ENABLE_HW_BOUND_CHECK) \
&& WASM_DISABLE_STACK_HW_BOUND_CHECK == 0)
if ((uint8 *)&prev_frame < exec_env->native_stack_boundary) {
@ -4194,58 +4200,51 @@ wasm_interp_call_wasm(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
}
}
else {
#if WASM_ENABLE_LAZY_JIT != 0
RunningMode running_mode =
wasm_runtime_get_running_mode((wasm_module_inst_t)module_inst);
/* Fast JIT to LLVM JIT tier-up is enabled */
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0
/* Fast JIT and LLVM JIT are both enabled, call llvm jit function
if (running_mode == Mode_Interp) {
wasm_interp_call_func_bytecode(module_inst, exec_env, function,
frame);
}
#if WASM_ENABLE_FAST_JIT != 0
else if (running_mode == Mode_Fast_JIT) {
fast_jit_call_func_bytecode(module_inst, exec_env, function, frame);
}
#endif
#if WASM_ENABLE_JIT != 0
else if (running_mode == Mode_LLVM_JIT) {
llvm_jit_call_func_bytecode(module_inst, exec_env, function, argc,
argv);
/* For llvm jit, the results have been stored in argv,
no need to copy them from stack frame again */
copy_argv_from_frame = false;
}
#endif
#if WASM_ENABLE_LAZY_JIT != 0 && WASM_ENABLE_FAST_JIT != 0 \
&& WASM_ENABLE_JIT != 0
else if (running_mode == Mode_Multi_Tier_JIT) {
/* Tier-up from Fast JIT to LLVM JIT, call llvm jit function
if it is compiled, else call fast jit function */
uint32 func_idx = (uint32)(function - module_inst->e->functions);
if (module_inst->module->func_ptrs_compiled
[func_idx - module_inst->module->import_function_count]) {
llvm_jit_call_func_bytecode(module_inst, exec_env, function, argc,
argv);
llvm_jit_call_func_bytecode(module_inst, exec_env, function,
argc, argv);
/* For llvm jit, the results have been stored in argv,
no need to copy them from stack frame again */
copy_argv_from_frame = false;
}
else {
fast_jit_call_func_bytecode(module_inst, exec_env, function, frame);
fast_jit_call_func_bytecode(module_inst, exec_env, function,
frame);
}
}
#elif WASM_ENABLE_JIT != 0
/* Only LLVM JIT is enabled */
llvm_jit_call_func_bytecode(module_inst, exec_env, function, argc,
argv);
/* For llvm jit, the results have been stored in argv,
no need to copy them from stack frame again */
copy_argv_from_frame = false;
#elif WASM_ENABLE_FAST_JIT != 0
/* Only Fast JIT is enabled */
fast_jit_call_func_bytecode(module_inst, exec_env, function, frame);
#else
/* Both Fast JIT and LLVM JIT are disabled */
wasm_interp_call_func_bytecode(module_inst, exec_env, function, frame);
#endif
#else /* else of WASM_ENABLE_LAZY_JIT != 0 */
/* Fast JIT to LLVM JIT tier-up is enabled */
#if WASM_ENABLE_JIT != 0
/* LLVM JIT is enabled */
llvm_jit_call_func_bytecode(module_inst, exec_env, function, argc,
argv);
/* For llvm jit, the results have been stored in argv,
no need to copy them from stack frame again */
copy_argv_from_frame = false;
#elif WASM_ENABLE_FAST_JIT != 0
/* Fast JIT is enabled */
fast_jit_call_func_bytecode(module_inst, exec_env, function, frame);
#else
/* Both Fast JIT and LLVM JIT are disabled */
wasm_interp_call_func_bytecode(module_inst, exec_env, function, frame);
#endif
#endif /* end of WASM_ENABLE_LAZY_JIT != 0 */
else {
/* There should always be a supported running mode selected */
bh_assert(0);
}
(void)wasm_interp_call_func_bytecode;
#if WASM_ENABLE_FAST_JIT != 0

View File

@ -469,28 +469,28 @@ LOAD_PTR(void *addr)
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 1, maddr); \
CHECK_ATOMIC_MEMORY_ACCESS(1); \
\
os_mutex_lock(&module->e->mem_lock); \
os_mutex_lock(&node->shared_mem_lock); \
readv = (uint32)(*(uint8 *)maddr); \
*(uint8 *)maddr = (uint8)(readv op sval); \
os_mutex_unlock(&module->e->mem_lock); \
os_mutex_unlock(&node->shared_mem_lock); \
} \
else if (opcode == WASM_OP_ATOMIC_RMW_I32_##OP_NAME##16_U) { \
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr); \
CHECK_ATOMIC_MEMORY_ACCESS(2); \
\
os_mutex_lock(&module->e->mem_lock); \
os_mutex_lock(&node->shared_mem_lock); \
readv = (uint32)LOAD_U16(maddr); \
STORE_U16(maddr, (uint16)(readv op sval)); \
os_mutex_unlock(&module->e->mem_lock); \
os_mutex_unlock(&node->shared_mem_lock); \
} \
else { \
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr); \
CHECK_ATOMIC_MEMORY_ACCESS(4); \
\
os_mutex_lock(&module->e->mem_lock); \
os_mutex_lock(&node->shared_mem_lock); \
readv = LOAD_I32(maddr); \
STORE_U32(maddr, readv op sval); \
os_mutex_unlock(&module->e->mem_lock); \
os_mutex_unlock(&node->shared_mem_lock); \
} \
PUSH_I32(readv); \
break; \
@ -509,39 +509,39 @@ LOAD_PTR(void *addr)
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 1, maddr); \
CHECK_ATOMIC_MEMORY_ACCESS(1); \
\
os_mutex_lock(&module->e->mem_lock); \
os_mutex_lock(&node->shared_mem_lock); \
readv = (uint64)(*(uint8 *)maddr); \
*(uint8 *)maddr = (uint8)(readv op sval); \
os_mutex_unlock(&module->e->mem_lock); \
os_mutex_unlock(&node->shared_mem_lock); \
} \
else if (opcode == WASM_OP_ATOMIC_RMW_I64_##OP_NAME##16_U) { \
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr); \
CHECK_ATOMIC_MEMORY_ACCESS(2); \
\
os_mutex_lock(&module->e->mem_lock); \
os_mutex_lock(&node->shared_mem_lock); \
readv = (uint64)LOAD_U16(maddr); \
STORE_U16(maddr, (uint16)(readv op sval)); \
os_mutex_unlock(&module->e->mem_lock); \
os_mutex_unlock(&node->shared_mem_lock); \
} \
else if (opcode == WASM_OP_ATOMIC_RMW_I64_##OP_NAME##32_U) { \
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr); \
CHECK_ATOMIC_MEMORY_ACCESS(4); \
\
os_mutex_lock(&module->e->mem_lock); \
os_mutex_lock(&node->shared_mem_lock); \
readv = (uint64)LOAD_U32(maddr); \
STORE_U32(maddr, (uint32)(readv op sval)); \
os_mutex_unlock(&module->e->mem_lock); \
os_mutex_unlock(&node->shared_mem_lock); \
} \
else { \
uint64 op_result; \
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 8, maddr); \
CHECK_ATOMIC_MEMORY_ACCESS(8); \
\
os_mutex_lock(&module->e->mem_lock); \
os_mutex_lock(&node->shared_mem_lock); \
readv = (uint64)LOAD_I64(maddr); \
op_result = readv op sval; \
STORE_I64(maddr, op_result); \
os_mutex_unlock(&module->e->mem_lock); \
os_mutex_unlock(&node->shared_mem_lock); \
} \
PUSH_I64(readv); \
break; \
@ -1183,6 +1183,11 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
uint32 local_idx, local_offset, global_idx;
uint8 opcode, local_type, *global_addr;
#if WASM_ENABLE_SHARED_MEMORY != 0
WASMSharedMemNode *node =
wasm_module_get_shared_memory((WASMModuleCommon *)module->module);
#endif
#if WASM_ENABLE_LABELS_AS_VALUES != 0
#define HANDLE_OPCODE(op) &&HANDLE_##op
DEFINE_GOTO_TABLE(const void *, handle_table);
@ -3296,23 +3301,23 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
if (opcode == WASM_OP_ATOMIC_I32_LOAD8_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 1, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(1);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint32)(*(uint8 *)maddr);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_I32_LOAD16_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(2);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint32)LOAD_U16(maddr);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(4);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = LOAD_I32(maddr);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
PUSH_I32(readv);
@ -3331,30 +3336,30 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
if (opcode == WASM_OP_ATOMIC_I64_LOAD8_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 1, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(1);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)(*(uint8 *)maddr);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_I64_LOAD16_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(2);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)LOAD_U16(maddr);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_I64_LOAD32_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(4);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)LOAD_U32(maddr);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 8, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(8);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = LOAD_I64(maddr);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
PUSH_I64(readv);
@ -3372,23 +3377,23 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
if (opcode == WASM_OP_ATOMIC_I32_STORE8) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 1, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(1);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
*(uint8 *)maddr = (uint8)sval;
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_I32_STORE16) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(2);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
STORE_U16(maddr, (uint16)sval);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(4);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
STORE_U32(maddr, sval);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
break;
}
@ -3406,30 +3411,30 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
if (opcode == WASM_OP_ATOMIC_I64_STORE8) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 1, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(1);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
*(uint8 *)maddr = (uint8)sval;
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_I64_STORE16) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(2);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
STORE_U16(maddr, (uint16)sval);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_I64_STORE32) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(4);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
STORE_U32(maddr, (uint32)sval);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 8, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(8);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
STORE_I64(maddr, sval);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
break;
}
@ -3449,32 +3454,32 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
CHECK_ATOMIC_MEMORY_ACCESS(1);
expect = (uint8)expect;
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint32)(*(uint8 *)maddr);
if (readv == expect)
*(uint8 *)maddr = (uint8)(sval);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_RMW_I32_CMPXCHG16_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(2);
expect = (uint16)expect;
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint32)LOAD_U16(maddr);
if (readv == expect)
STORE_U16(maddr, (uint16)(sval));
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(4);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = LOAD_I32(maddr);
if (readv == expect)
STORE_U32(maddr, sval);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
PUSH_I32(readv);
break;
@ -3495,44 +3500,44 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
CHECK_ATOMIC_MEMORY_ACCESS(1);
expect = (uint8)expect;
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)(*(uint8 *)maddr);
if (readv == expect)
*(uint8 *)maddr = (uint8)(sval);
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_RMW_I64_CMPXCHG16_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 2, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(2);
expect = (uint16)expect;
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)LOAD_U16(maddr);
if (readv == expect)
STORE_U16(maddr, (uint16)(sval));
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else if (opcode == WASM_OP_ATOMIC_RMW_I64_CMPXCHG32_U) {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(4);
expect = (uint32)expect;
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)LOAD_U32(maddr);
if (readv == expect)
STORE_U32(maddr, (uint32)(sval));
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
else {
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 8, maddr);
CHECK_ATOMIC_MEMORY_ACCESS(8);
os_mutex_lock(&module->e->mem_lock);
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)LOAD_I64(maddr);
if (readv == expect) {
STORE_I64(maddr, sval);
}
os_mutex_unlock(&module->e->mem_lock);
os_mutex_unlock(&node->shared_mem_lock);
}
PUSH_I64(readv);
break;
@ -3901,6 +3906,7 @@ wasm_interp_call_wasm(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
}
argc = function->param_cell_num;
RECORD_STACK_USAGE(exec_env, (uint8 *)&prev_frame);
#if !(defined(OS_ENABLE_HW_BOUND_CHECK) \
&& WASM_DISABLE_STACK_HW_BOUND_CHECK == 0)
if ((uint8 *)&prev_frame < exec_env->native_stack_boundary) {

View File

@ -1399,6 +1399,7 @@ load_global_import(const uint8 **p_buf, const uint8 *buf_end,
WASMModule *sub_module = NULL;
WASMGlobal *linked_global = NULL;
#endif
bool ret = false;
CHECK_BUF(p, p_end, 2);
declare_type = read_uint8(p);
@ -1411,15 +1412,16 @@ load_global_import(const uint8 **p_buf, const uint8 *buf_end,
}
#if WASM_ENABLE_LIBC_BUILTIN != 0
global->is_linked = wasm_native_lookup_libc_builtin_global(
sub_module_name, global_name, global);
if (global->is_linked) {
ret = wasm_native_lookup_libc_builtin_global(sub_module_name, global_name,
global);
if (ret) {
if (global->type != declare_type
|| global->is_mutable != declare_mutable) {
set_error_buf(error_buf, error_buf_size,
"incompatible import type");
return false;
}
global->is_linked = true;
}
#endif
#if WASM_ENABLE_MULTI_MODULE != 0
@ -1449,6 +1451,7 @@ load_global_import(const uint8 **p_buf, const uint8 *buf_end,
global->is_mutable = (declare_mutable == 1);
(void)parent_module;
(void)ret;
return true;
fail:
return false;
@ -2989,6 +2992,7 @@ static bool
init_llvm_jit_functions_stage1(WASMModule *module, char *error_buf,
uint32 error_buf_size)
{
LLVMJITOptions llvm_jit_options = wasm_runtime_get_llvm_jit_options();
AOTCompOption option = { 0 };
char *aot_last_error;
uint64 size;
@ -3027,8 +3031,11 @@ init_llvm_jit_functions_stage1(WASMModule *module, char *error_buf,
}
option.is_jit_mode = true;
option.opt_level = 3;
option.size_level = 3;
llvm_jit_options = wasm_runtime_get_llvm_jit_options();
option.opt_level = llvm_jit_options.opt_level;
option.size_level = llvm_jit_options.size_level;
#if WASM_ENABLE_BULK_MEMORY != 0
option.enable_bulk_memory = true;
#endif
@ -3048,6 +3055,9 @@ init_llvm_jit_functions_stage1(WASMModule *module, char *error_buf,
#if (WASM_ENABLE_PERF_PROFILING != 0) || (WASM_ENABLE_DUMP_CALL_STACK != 0)
option.enable_aux_stack_frame = true;
#endif
#if WASM_ENABLE_MEMORY_PROFILING != 0
option.enable_stack_estimation = true;
#endif
module->comp_ctx = aot_create_comp_context(module->comp_data, &option);
if (!module->comp_ctx) {
@ -3109,6 +3119,8 @@ init_llvm_jit_functions_stage2(WASMModule *module, char *error_buf,
module->func_ptrs[i] = (void *)func_addr;
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_LAZY_JIT != 0
module->functions[i]->llvm_jit_func_ptr = (void *)func_addr;
if (module->orcjit_stop_compiling)
return false;
#endif
@ -3199,9 +3211,9 @@ orcjit_thread_callback(void *arg)
/* Wait until init_llvm_jit_functions_stage2 finishes */
os_mutex_lock(&module->tierup_wait_lock);
while (!module->llvm_jit_inited) {
while (!(module->llvm_jit_inited && module->enable_llvm_jit_compilation)) {
os_cond_reltimedwait(&module->tierup_wait_cond,
&module->tierup_wait_lock, 10);
&module->tierup_wait_lock, 10000);
if (module->orcjit_stop_compiling) {
/* init_llvm_jit_functions_stage2 failed */
os_mutex_unlock(&module->tierup_wait_lock);
@ -3853,7 +3865,7 @@ create_module(char *error_buf, uint32 error_buf_size)
#endif
#if WASM_ENABLE_DEBUG_INTERP != 0 \
|| (WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT \
|| (WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0)
if (os_mutex_init(&module->instance_list_lock) != 0) {
set_error_buf(error_buf, error_buf_size,
@ -4148,10 +4160,8 @@ check_wasi_abi_compatibility(const WASMModule *module,
/* should have one at least */
if (module->import_wasi_api && !start && !initialize) {
set_error_buf(
error_buf, error_buf_size,
"a module with WASI apis must be either a command or a reactor");
return false;
LOG_WARNING("warning: a module with WASI apis should be either "
"a command or a reactor");
}
/*
@ -4256,7 +4266,8 @@ wasm_loader_unload(WASMModule *module)
if (!module)
return;
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT && WASM_ENABLE_LAZY_JIT != 0
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0
module->orcjit_stop_compiling = true;
if (module->llvm_jit_init_thread)
os_thread_join(module->llvm_jit_init_thread, NULL);
@ -4277,7 +4288,8 @@ wasm_loader_unload(WASMModule *module)
aot_destroy_comp_data(module->comp_data);
#endif
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT && WASM_ENABLE_LAZY_JIT != 0
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0
if (module->tierup_wait_lock_inited) {
os_mutex_destroy(&module->tierup_wait_lock);
os_cond_destroy(&module->tierup_wait_cond);
@ -4312,9 +4324,9 @@ wasm_loader_unload(WASMModule *module)
module->functions[i]->fast_jit_jitted_code);
}
#if WASM_ENABLE_JIT != 0 && WASM_ENABLE_LAZY_JIT != 0
if (module->functions[i]->llvm_jit_func_ptr) {
if (module->functions[i]->call_to_fast_jit_from_llvm_jit) {
jit_code_cache_free(
module->functions[i]->llvm_jit_func_ptr);
module->functions[i]->call_to_fast_jit_from_llvm_jit);
}
#endif
#endif
@ -4407,7 +4419,7 @@ wasm_loader_unload(WASMModule *module)
#endif
#if WASM_ENABLE_DEBUG_INTERP != 0 \
|| (WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT \
|| (WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0)
os_mutex_destroy(&module->instance_list_lock);
#endif

View File

@ -1835,6 +1835,7 @@ static bool
init_llvm_jit_functions_stage1(WASMModule *module, char *error_buf,
uint32 error_buf_size)
{
LLVMJITOptions llvm_jit_options = wasm_runtime_get_llvm_jit_options();
AOTCompOption option = { 0 };
char *aot_last_error;
uint64 size;
@ -1873,8 +1874,9 @@ init_llvm_jit_functions_stage1(WASMModule *module, char *error_buf,
}
option.is_jit_mode = true;
option.opt_level = 3;
option.size_level = 3;
option.opt_level = llvm_jit_options.opt_level;
option.size_level = llvm_jit_options.size_level;
#if WASM_ENABLE_BULK_MEMORY != 0
option.enable_bulk_memory = true;
#endif
@ -1894,6 +1896,9 @@ init_llvm_jit_functions_stage1(WASMModule *module, char *error_buf,
#if (WASM_ENABLE_PERF_PROFILING != 0) || (WASM_ENABLE_DUMP_CALL_STACK != 0)
option.enable_aux_stack_frame = true;
#endif
#if WASM_ENABLE_MEMORY_PROFILING != 0
option.enable_stack_estimation = true;
#endif
module->comp_ctx = aot_create_comp_context(module->comp_data, &option);
if (!module->comp_ctx) {
@ -1957,6 +1962,8 @@ init_llvm_jit_functions_stage2(WASMModule *module, char *error_buf,
module->func_ptrs[i] = (void *)func_addr;
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_LAZY_JIT != 0
module->functions[i]->llvm_jit_func_ptr = (void *)func_addr;
if (module->orcjit_stop_compiling)
return false;
#endif
@ -2047,9 +2054,9 @@ orcjit_thread_callback(void *arg)
/* Wait until init_llvm_jit_functions_stage2 finishes */
os_mutex_lock(&module->tierup_wait_lock);
while (!module->llvm_jit_inited) {
while (!(module->llvm_jit_inited && module->enable_llvm_jit_compilation)) {
os_cond_reltimedwait(&module->tierup_wait_cond,
&module->tierup_wait_lock, 10);
&module->tierup_wait_lock, 10000);
if (module->orcjit_stop_compiling) {
/* init_llvm_jit_functions_stage2 failed */
os_mutex_unlock(&module->tierup_wait_lock);
@ -2718,7 +2725,8 @@ create_module(char *error_buf, uint32 error_buf_size)
bh_assert(ret == BH_LIST_SUCCESS);
#endif
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT && WASM_ENABLE_LAZY_JIT != 0
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0
if (os_mutex_init(&module->instance_list_lock) != 0) {
set_error_buf(error_buf, error_buf_size,
"init instance list lock failed");
@ -2939,7 +2947,8 @@ wasm_loader_unload(WASMModule *module)
if (!module)
return;
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT && WASM_ENABLE_LAZY_JIT != 0
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0
module->orcjit_stop_compiling = true;
if (module->llvm_jit_init_thread)
os_thread_join(module->llvm_jit_init_thread, NULL);
@ -2960,7 +2969,8 @@ wasm_loader_unload(WASMModule *module)
aot_destroy_comp_data(module->comp_data);
#endif
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT && WASM_ENABLE_LAZY_JIT != 0
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0
if (module->tierup_wait_lock_inited) {
os_mutex_destroy(&module->tierup_wait_lock);
os_cond_destroy(&module->tierup_wait_cond);
@ -2995,9 +3005,9 @@ wasm_loader_unload(WASMModule *module)
module->functions[i]->fast_jit_jitted_code);
}
#if WASM_ENABLE_JIT != 0 && WASM_ENABLE_LAZY_JIT != 0
if (module->functions[i]->llvm_jit_func_ptr) {
if (module->functions[i]->call_to_fast_jit_from_llvm_jit) {
jit_code_cache_free(
module->functions[i]->llvm_jit_func_ptr);
module->functions[i]->call_to_fast_jit_from_llvm_jit);
}
#endif
#endif
@ -3056,7 +3066,8 @@ wasm_loader_unload(WASMModule *module)
}
#endif
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT && WASM_ENABLE_LAZY_JIT != 0
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0
os_mutex_destroy(&module->instance_list_lock);
#endif

View File

@ -737,13 +737,12 @@ functions_instantiate(const WASMModule *module, WASMModuleInstance *module_inst,
function++;
}
bh_assert((uint32)(function - functions) == function_count);
#if WASM_ENABLE_FAST_JIT != 0
module_inst->fast_jit_func_ptrs = module->fast_jit_func_ptrs;
#endif
bh_assert((uint32)(function - functions) == function_count);
(void)module_inst;
return functions;
}
@ -1288,9 +1287,8 @@ init_func_ptrs(WASMModuleInstance *module_inst, WASMModule *module,
*func_ptrs = import_func->func_ptr_linked;
}
/* Set defined function pointers */
bh_memcpy_s(func_ptrs, sizeof(void *) * module->function_count,
module->func_ptrs, sizeof(void *) * module->function_count);
/* The defined function pointers will be set in
wasm_runtime_set_running_mode, no need to set them here */
return true;
}
#endif /* end of WASM_ENABLE_JIT != 0 */
@ -1336,6 +1334,173 @@ init_func_type_indexes(WASMModuleInstance *module_inst, char *error_buf,
}
#endif /* end of WASM_ENABLE_FAST_JIT != 0 || WASM_ENABLE_JIT != 0 */
static bool
set_running_mode(WASMModuleInstance *module_inst, RunningMode running_mode,
bool first_time_set)
{
WASMModule *module = module_inst->module;
if (running_mode == Mode_Default) {
#if WASM_ENABLE_FAST_JIT == 0 && WASM_ENABLE_JIT == 0
running_mode = Mode_Interp;
#elif WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT == 0
running_mode = Mode_Fast_JIT;
#elif WASM_ENABLE_FAST_JIT == 0 && WASM_ENABLE_JIT != 0
running_mode = Mode_LLVM_JIT;
#else /* WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 */
#if WASM_ENABLE_LAZY_JIT == 0
running_mode = Mode_LLVM_JIT;
#else
running_mode = Mode_Multi_Tier_JIT;
#endif
#endif
}
if (!wasm_runtime_is_running_mode_supported(running_mode))
return false;
#if !(WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0) /* No possible multi-tier JIT */
module_inst->e->running_mode = running_mode;
if (running_mode == Mode_Interp) {
/* Do nothing for Mode_Interp */
}
else if (running_mode == Mode_Fast_JIT) {
/* Do nothing for Mode_Fast_JIT since
module_inst->fast_jit_func_ptrs is same as
module->fast_jit_func_ptrs */
}
#if WASM_ENABLE_JIT != 0
else if (running_mode == Mode_LLVM_JIT) {
/* Set defined function pointers */
bh_memcpy_s(module_inst->func_ptrs + module->import_function_count,
sizeof(void *) * module->function_count, module->func_ptrs,
sizeof(void *) * module->function_count);
}
#endif
else {
bh_assert(0);
}
#else /* Possible multi-tier JIT */
os_mutex_lock(&module->instance_list_lock);
module_inst->e->running_mode = running_mode;
if (running_mode == Mode_Interp) {
/* Do nothing for Mode_Interp */
}
#if WASM_ENABLE_FAST_JIT != 0
else if (running_mode == Mode_Fast_JIT) {
JitGlobals *jit_globals = jit_compiler_get_jit_globals();
uint32 i;
/* Allocate memory for fast_jit_func_ptrs if needed */
if (!module_inst->fast_jit_func_ptrs
|| module_inst->fast_jit_func_ptrs == module->fast_jit_func_ptrs) {
uint64 total_size = (uint64)sizeof(void *) * module->function_count;
if (!(module_inst->fast_jit_func_ptrs =
runtime_malloc(total_size, NULL, 0))) {
os_mutex_unlock(&module->instance_list_lock);
return false;
}
}
for (i = 0; i < module->function_count; i++) {
if (module->functions[i]->fast_jit_jitted_code) {
/* current fast jit function has been compiled */
module_inst->fast_jit_func_ptrs[i] =
module->functions[i]->fast_jit_jitted_code;
}
else {
module_inst->fast_jit_func_ptrs[i] =
jit_globals->compile_fast_jit_and_then_call;
}
}
}
#endif
#if WASM_ENABLE_JIT != 0
else if (running_mode == Mode_LLVM_JIT) {
void **llvm_jit_func_ptrs;
uint32 i;
/* Notify backend threads to start llvm jit compilation */
module->enable_llvm_jit_compilation = true;
/* Wait until llvm jit finishes initialization */
os_mutex_lock(&module->tierup_wait_lock);
while (!module->llvm_jit_inited) {
os_cond_reltimedwait(&module->tierup_wait_cond,
&module->tierup_wait_lock, 10);
if (module->orcjit_stop_compiling) {
/* init_llvm_jit_functions_stage2 failed */
os_mutex_unlock(&module->tierup_wait_lock);
os_mutex_unlock(&module->instance_list_lock);
return false;
}
}
os_mutex_unlock(&module->tierup_wait_lock);
llvm_jit_func_ptrs =
module_inst->func_ptrs + module->import_function_count;
for (i = 0; i < module->function_count; i++) {
llvm_jit_func_ptrs[i] = module->functions[i]->llvm_jit_func_ptr;
}
}
#endif
else if (running_mode == Mode_Multi_Tier_JIT) {
/* Notify backend threads to start llvm jit compilation */
module->enable_llvm_jit_compilation = true;
/* Free fast_jit_func_ptrs if it is allocated before */
if (module_inst->fast_jit_func_ptrs
&& module_inst->fast_jit_func_ptrs != module->fast_jit_func_ptrs) {
wasm_runtime_free(module_inst->fast_jit_func_ptrs);
}
module_inst->fast_jit_func_ptrs = module->fast_jit_func_ptrs;
/* Copy all llvm jit func ptrs from the module */
bh_memcpy_s(module_inst->func_ptrs + module->import_function_count,
sizeof(void *) * module->function_count, module->func_ptrs,
sizeof(void *) * module->function_count);
}
else {
bh_assert(0);
}
/* Add module instance into module's instance list if not added */
if (first_time_set) {
bool found = false;
WASMModuleInstance *node = module->instance_list;
while (node) {
if (node == module_inst) {
found = true;
break;
}
node = node->e->next;
}
if (!found) {
module_inst->e->next = module->instance_list;
module->instance_list = module_inst;
}
}
os_mutex_unlock(&module->instance_list_lock);
#endif /* end of !(WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0) */
(void)module;
return true;
}
bool
wasm_set_running_mode(WASMModuleInstance *module_inst, RunningMode running_mode)
{
return set_running_mode(module_inst, running_mode, false);
}
/**
* Instantiate module
*/
@ -1422,15 +1587,6 @@ wasm_instantiate(WASMModule *module, bool is_sub_inst, uint32 stack_size,
module_inst->e =
(WASMModuleInstanceExtra *)((uint8 *)module_inst + extra_info_offset);
#if WASM_ENABLE_SHARED_MEMORY != 0
if (os_mutex_init(&module_inst->e->mem_lock) != 0) {
set_error_buf(error_buf, error_buf_size,
"create shared memory lock failed");
goto fail;
}
module_inst->e->mem_lock_inited = true;
#endif
#if WASM_ENABLE_MULTI_MODULE != 0
module_inst->e->sub_module_inst_list =
&module_inst->e->sub_module_inst_list_head;
@ -1803,33 +1959,39 @@ wasm_instantiate(WASMModule *module, bool is_sub_inst, uint32 stack_size,
}
#endif
#if WASM_ENABLE_DEBUG_INTERP != 0 \
|| (WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0)
#if WASM_ENABLE_WASI_NN != 0
if (!is_sub_inst) {
if (!(module_inst->e->wasi_nn_ctx = wasi_nn_initialize())) {
set_error_buf(error_buf, error_buf_size,
"wasi nn initialization failed");
goto fail;
}
}
#endif
#if WASM_ENABLE_DEBUG_INTERP != 0
if (!is_sub_inst) {
/* Add module instance into module's instance list */
os_mutex_lock(&module->instance_list_lock);
#if WASM_ENABLE_DEBUG_INTERP != 0
if (module->instance_list) {
LOG_WARNING(
"warning: multiple instances referencing to the same module "
"may cause unexpected behaviour during debugging");
}
#endif
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0
/* Copy llvm func ptrs again in case that they were updated
after the module instance was created */
bh_memcpy_s(module_inst->func_ptrs + module->import_function_count,
sizeof(void *) * module->function_count, module->func_ptrs,
sizeof(void *) * module->function_count);
#endif
module_inst->e->next = module->instance_list;
module->instance_list = module_inst;
os_mutex_unlock(&module->instance_list_lock);
}
#endif
/* Set running mode before executing wasm functions */
if (!set_running_mode(module_inst, wasm_runtime_get_default_running_mode(),
true)) {
set_error_buf(error_buf, error_buf_size,
"set instance running mode failed");
goto fail;
}
if (module->start_function != (uint32)-1) {
/* TODO: fix start function can be import function issue */
if (module->start_function >= module->import_function_count)
@ -1895,11 +2057,48 @@ wasm_deinstantiate(WASMModuleInstance *module_inst, bool is_sub_inst)
if (!module_inst)
return;
#if WASM_ENABLE_DEBUG_INTERP != 0 \
|| (WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0)
/* Remove instance from module's instance list before freeing
func_ptrs and fast_jit_func_ptrs of the instance, to avoid
accessing the freed memory in the jit backend compilation
threads */
if (!is_sub_inst) {
WASMModule *module = module_inst->module;
WASMModuleInstance *instance_prev = NULL, *instance;
os_mutex_lock(&module->instance_list_lock);
instance = module->instance_list;
while (instance) {
if (instance == module_inst) {
if (!instance_prev)
module->instance_list = instance->e->next;
else
instance_prev->e->next = instance->e->next;
break;
}
instance_prev = instance;
instance = instance->e->next;
}
os_mutex_unlock(&module->instance_list_lock);
}
#endif
#if WASM_ENABLE_JIT != 0
if (module_inst->func_ptrs)
wasm_runtime_free(module_inst->func_ptrs);
#endif
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0
if (module_inst->fast_jit_func_ptrs
&& module_inst->fast_jit_func_ptrs
!= module_inst->module->fast_jit_func_ptrs)
wasm_runtime_free(module_inst->fast_jit_func_ptrs);
#endif
#if WASM_ENABLE_FAST_JIT != 0 || WASM_ENABLE_JIT != 0
if (module_inst->func_type_indexes)
wasm_runtime_free(module_inst->func_type_indexes);
@ -1951,39 +2150,17 @@ wasm_deinstantiate(WASMModuleInstance *module_inst, bool is_sub_inst)
}
#endif
#if WASM_ENABLE_DEBUG_INTERP != 0 \
|| (WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0)
if (!is_sub_inst) {
WASMModule *module = module_inst->module;
WASMModuleInstance *instance_prev = NULL, *instance;
os_mutex_lock(&module->instance_list_lock);
instance = module->instance_list;
while (instance) {
if (instance == module_inst) {
if (!instance_prev)
module->instance_list = instance->e->next;
else
instance_prev->e->next = instance->e->next;
break;
}
instance_prev = instance;
instance = instance->e->next;
}
os_mutex_unlock(&module->instance_list_lock);
}
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
if (module_inst->e->mem_lock_inited)
os_mutex_destroy(&module_inst->e->mem_lock);
#endif
if (module_inst->e->c_api_func_imports)
wasm_runtime_free(module_inst->e->c_api_func_imports);
#if WASM_ENABLE_WASI_NN != 0
if (!is_sub_inst) {
WASINNContext *wasi_nn_ctx = module_inst->e->wasi_nn_ctx;
if (wasi_nn_ctx)
wasi_nn_destroy(wasi_nn_ctx);
}
#endif
wasm_runtime_free(module_inst);
}
@ -2056,6 +2233,7 @@ call_wasm_with_hw_bound_check(WASMModuleInstance *module_inst,
/* Check native stack overflow firstly to ensure we have enough
native stack to run the following codes before actually calling
the aot function in invokeNative function. */
RECORD_STACK_USAGE(exec_env, (uint8 *)&exec_env_tls);
if ((uint8 *)&exec_env_tls < exec_env->native_stack_boundary
+ page_size * (guard_page_count + 1)) {
wasm_set_exception(module_inst, "native stack overflow");

View File

@ -11,6 +11,10 @@
#include "../common/wasm_runtime_common.h"
#include "../common/wasm_exec_env.h"
#if WASM_ENABLE_WASI_NN != 0
#include "../libraries/wasi-nn/src/wasi_nn_private.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
@ -59,9 +63,7 @@ typedef enum WASMExceptionID {
EXCE_AUX_STACK_UNDERFLOW,
EXCE_OUT_OF_BOUNDS_TABLE_ACCESS,
EXCE_OPERAND_STACK_OVERFLOW,
#if WASM_ENABLE_FAST_JIT != 0
EXCE_FAILED_TO_COMPILE_FAST_JIT_FUNC,
#endif
EXCE_ALREADY_THROWN,
EXCE_NUM,
} WASMExceptionID;
@ -219,12 +221,7 @@ typedef struct WASMModuleInstanceExtra {
WASMFunctionInstance *retain_function;
CApiFuncImport *c_api_func_imports;
#if WASM_ENABLE_SHARED_MEMORY != 0
/* lock for shared memory atomic operations */
korp_mutex mem_lock;
bool mem_lock_inited;
#endif
RunningMode running_mode;
#if WASM_ENABLE_MULTI_MODULE != 0
bh_list sub_module_inst_list_head;
@ -238,10 +235,14 @@ typedef struct WASMModuleInstanceExtra {
#endif
#if WASM_ENABLE_DEBUG_INTERP != 0 \
|| (WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT \
|| (WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
&& WASM_ENABLE_LAZY_JIT != 0)
WASMModuleInstance *next;
#endif
#if WASM_ENABLE_WASI_NN != 0
WASINNContext *wasi_nn_ctx;
#endif
} WASMModuleInstanceExtra;
struct AOTFuncPerfProfInfo;
@ -298,7 +299,11 @@ struct WASMModuleInstance {
not available in AOTModuleInstance */
DefPointer(void **, import_func_ptrs);
/* Array of function pointers to fast jit functions,
not available in AOTModuleInstance */
not available in AOTModuleInstance:
Only when the multi-tier JIT macros are all enabled and the running
mode of current module instance is set to Mode_Fast_JIT, runtime
will allocate new memory for it, otherwise it always points to the
module->fast_jit_func_ptrs */
DefPointer(void **, fast_jit_func_ptrs);
/* The custom data that can be set/get by wasm_{get|set}_custom_data */
DefPointer(void *, custom_data);
@ -402,6 +407,10 @@ wasm_dump_perf_profiling(const WASMModuleInstance *module_inst);
void
wasm_deinstantiate(WASMModuleInstance *module_inst, bool is_sub_inst);
bool
wasm_set_running_mode(WASMModuleInstance *module_inst,
RunningMode running_mode);
WASMFunctionInstance *
wasm_lookup_function(const WASMModuleInstance *module_inst, const char *name,
const char *signature);

View File

@ -565,6 +565,7 @@ pthread_create_wrapper(wasm_exec_env_t exec_env,
#if WASM_ENABLE_LIBC_WASI != 0
WASIContext *wasi_ctx;
#endif
CApiFuncImport **new_c_api_func_imports = NULL;
bh_assert(module);
bh_assert(module_inst);
@ -597,6 +598,46 @@ pthread_create_wrapper(wasm_exec_env_t exec_env,
wasm_runtime_set_wasi_ctx(new_module_inst, wasi_ctx);
#endif
/* workaround about passing instantiate-linking information */
{
CApiFuncImport *c_api_func_imports;
uint32 import_func_count = 0;
uint32 size_in_bytes = 0;
#if WASM_ENABLE_INTERP != 0
if (module_inst->module_type == Wasm_Module_Bytecode) {
new_c_api_func_imports = &(
((WASMModuleInstance *)new_module_inst)->e->c_api_func_imports);
c_api_func_imports =
((WASMModuleInstance *)module_inst)->e->c_api_func_imports;
import_func_count = ((WASMModule *)module)->import_function_count;
}
#endif
#if WASM_ENABLE_AOT != 0
if (module_inst->module_type == Wasm_Module_AoT) {
AOTModuleInstanceExtra *e =
(AOTModuleInstanceExtra *)((AOTModuleInstance *)new_module_inst)
->e;
new_c_api_func_imports = &(e->c_api_func_imports);
e = (AOTModuleInstanceExtra *)((AOTModuleInstance *)module_inst)->e;
c_api_func_imports = e->c_api_func_imports;
import_func_count = ((AOTModule *)module)->import_func_count;
}
#endif
if (import_func_count != 0 && c_api_func_imports) {
size_in_bytes = sizeof(CApiFuncImport *) * import_func_count;
*new_c_api_func_imports = wasm_runtime_malloc(size_in_bytes);
if (!(*new_c_api_func_imports))
goto fail;
bh_memcpy_s(*new_c_api_func_imports, size_in_bytes,
c_api_func_imports, size_in_bytes);
}
}
if (!(info_node = wasm_runtime_malloc(sizeof(ThreadInfoNode))))
goto fail;

View File

@ -63,6 +63,12 @@ typedef struct WASIContext {
wasi_ctx_t
wasm_runtime_get_wasi_ctx(wasm_module_inst_t module_inst);
static inline size_t
min(size_t a, size_t b)
{
return a > b ? b : a;
}
static inline struct fd_table *
wasi_ctx_get_curfds(wasm_module_inst_t module_inst, wasi_ctx_t wasi_ctx)
{
@ -951,6 +957,97 @@ wasi_path_remove_directory(wasm_exec_env_t exec_env, wasi_fd_t fd,
return wasmtime_ssp_path_remove_directory(curfds, fd, path, path_len);
}
#if WASM_ENABLE_THREAD_MGR != 0
static __wasi_timestamp_t
get_timeout_for_poll_oneoff(const wasi_subscription_t *in,
uint32 nsubscriptions)
{
__wasi_timestamp_t timeout = (__wasi_timestamp_t)-1;
uint32 i = 0;
for (i = 0; i < nsubscriptions; ++i) {
const __wasi_subscription_t *s = &in[i];
if (s->u.type == __WASI_EVENTTYPE_CLOCK
&& (s->u.u.clock.flags & __WASI_SUBSCRIPTION_CLOCK_ABSTIME) == 0) {
timeout = min(timeout, s->u.u.clock.timeout);
}
}
return timeout;
}
static void
update_clock_subscription_data(wasi_subscription_t *in, uint32 nsubscriptions,
const wasi_timestamp_t new_timeout)
{
uint32 i = 0;
for (i = 0; i < nsubscriptions; ++i) {
__wasi_subscription_t *s = &in[i];
if (s->u.type == __WASI_EVENTTYPE_CLOCK) {
s->u.u.clock.timeout = new_timeout;
}
}
}
static wasi_errno_t
execute_interruptible_poll_oneoff(wasm_module_inst_t module_inst,
#if !defined(WASMTIME_SSP_STATIC_CURFDS)
struct fd_table *curfds,
#endif
const __wasi_subscription_t *in,
__wasi_event_t *out, size_t nsubscriptions,
size_t *nevents)
{
if (nsubscriptions == 0) {
*nevents = 0;
return __WASI_ESUCCESS;
}
wasi_errno_t err;
__wasi_timestamp_t elapsed = 0;
const __wasi_timestamp_t timeout = get_timeout_for_poll_oneoff(
in, nsubscriptions),
time_quant = 1e9;
const uint64 size_to_copy =
nsubscriptions * (uint64)sizeof(wasi_subscription_t);
__wasi_subscription_t *in_copy = NULL;
if (size_to_copy >= UINT32_MAX
|| !(in_copy = (__wasi_subscription_t *)wasm_runtime_malloc(
(uint32)size_to_copy))) {
return __WASI_ENOMEM;
}
bh_memcpy_s(in_copy, size_to_copy, in, size_to_copy);
while (timeout == (__wasi_timestamp_t)-1 || elapsed <= timeout) {
elapsed += time_quant;
/* update timeout for clock subscription events */
update_clock_subscription_data(in_copy, nsubscriptions,
min(time_quant, timeout - elapsed));
err = wasmtime_ssp_poll_oneoff(curfds, in_copy, out, nsubscriptions,
nevents);
if (err) {
wasm_runtime_free(in_copy);
return err;
}
if (wasm_runtime_get_exception(module_inst) || *nevents > 0) {
wasm_runtime_free(in_copy);
if (*nevents) {
return __WASI_ESUCCESS;
}
return EINTR;
}
}
wasm_runtime_free(in_copy);
return __WASI_ESUCCESS;
}
#endif
static wasi_errno_t
wasi_poll_oneoff(wasm_exec_env_t exec_env, const wasi_subscription_t *in,
wasi_event_t *out, uint32 nsubscriptions, uint32 *nevents_app)
@ -958,7 +1055,7 @@ wasi_poll_oneoff(wasm_exec_env_t exec_env, const wasi_subscription_t *in,
wasm_module_inst_t module_inst = get_module_inst(exec_env);
wasi_ctx_t wasi_ctx = get_wasi_ctx(module_inst);
struct fd_table *curfds = wasi_ctx_get_curfds(module_inst, wasi_ctx);
size_t nevents;
size_t nevents = 0;
wasi_errno_t err;
if (!wasi_ctx)
@ -969,7 +1066,12 @@ wasi_poll_oneoff(wasm_exec_env_t exec_env, const wasi_subscription_t *in,
|| !validate_native_addr(nevents_app, sizeof(uint32)))
return (wasi_errno_t)-1;
#if WASM_ENABLE_THREAD_MGR == 0
err = wasmtime_ssp_poll_oneoff(curfds, in, out, nsubscriptions, &nevents);
#else
err = execute_interruptible_poll_oneoff(module_inst, curfds, in, out,
nsubscriptions, &nevents);
#endif
if (err)
return err;
@ -1861,12 +1963,6 @@ allocate_iovec_app_buffer(wasm_module_inst_t module_inst,
return __WASI_ESUCCESS;
}
static inline size_t
min(size_t a, size_t b)
{
return a > b ? b : a;
}
static wasi_errno_t
copy_buffer_to_iovec_app(wasm_module_inst_t module_inst, uint8 *buf_begin,
uint32 buf_size, iovec_app_t *data, uint32 data_len,

View File

@ -0,0 +1,14 @@
/*
* Copyright (C) 2023 Amazon.com, Inc. or its affiliates. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#if !defined(__GNUC_PREREQ) && (defined(__GNUC__) || defined(__GNUG__)) \
&& !defined(__clang__) && defined(__GNUC_MINOR__)
/* Depending on the platform the macro is defined in sys/features.h or
features.h Given the macro is simple, we re-implement it here instead of
dealing with two different paths.
*/
#define __GNUC_PREREQ(maj, min) \
((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min))
#endif

View File

@ -1,42 +0,0 @@
// Part of the Wasmtime Project, under the Apache License v2.0 with LLVM
// Exceptions. See
// https://github.com/bytecodealliance/wasmtime/blob/main/LICENSE for license
// information.
//
// Significant parts of this file are derived from cloudabi-utils. See
// https://github.com/bytecodealliance/wasmtime/blob/main/lib/wasi/sandboxed-system-primitives/src/LICENSE
// for license information.
//
// The upstream file contains the following copyright notice:
//
// Copyright (c) 2015 Nuxi, https://nuxi.nl/
#ifndef COMMON_LIMITS_H
#define COMMON_LIMITS_H
#define NUMERIC_MIN(t) \
_Generic((t)0, char \
: CHAR_MIN, signed char \
: SCHAR_MIN, unsigned char : 0, short \
: SHRT_MIN, unsigned short : 0, int \
: INT_MIN, unsigned int : 0, long \
: LONG_MIN, unsigned long : 0, long long \
: LLONG_MIN, unsigned long long : 0, default \
: (void)0)
#define NUMERIC_MAX(t) \
_Generic((t)0, char \
: CHAR_MAX, signed char \
: SCHAR_MAX, unsigned char \
: UCHAR_MAX, short \
: SHRT_MAX, unsigned short \
: USHRT_MAX, int \
: INT_MAX, unsigned int \
: UINT_MAX, long \
: LONG_MAX, unsigned long \
: ULONG_MAX, long long \
: LLONG_MAX, unsigned long long \
: ULLONG_MAX, default \
: (void)0)
#endif

View File

@ -15,7 +15,6 @@
#include "bh_platform.h"
#include "wasmtime_ssp.h"
#include "locking.h"
#include "numeric_limits.h"
#include "posix.h"
#include "random.h"
#include "refcount.h"
@ -2257,8 +2256,7 @@ convert_timestamp(__wasi_timestamp_t in, struct timespec *out)
in /= 1000000000;
// Clamp to the maximum in case it would overflow our system's time_t.
out->tv_sec =
(time_t)in < NUMERIC_MAX(time_t) ? (time_t)in : NUMERIC_MAX(time_t);
out->tv_sec = (time_t)in < BH_TIME_T_MAX ? (time_t)in : BH_TIME_T_MAX;
}
// Converts the provided timestamps and flags to a set of arguments for
@ -3226,6 +3224,7 @@ wasi_ssp_sock_get_reuse_port(
#else
errno = ENOTSUP;
ret = BHT_ERROR;
optval = 0;
#endif /* defined(SO_REUSEPORT) */
fd_object_release(fo);

View File

@ -16,6 +16,7 @@
#include "bh_platform.h"
#include "locking.h"
#include "gnuc.h"
#define PRODUCES(...) LOCKS_SHARED(__VA_ARGS__) NO_LOCK_ANALYSIS
#define CONSUMES(...) UNLOCKS(__VA_ARGS__) NO_LOCK_ANALYSIS
@ -95,6 +96,42 @@ refcount_release(struct refcount *r)
return old == 1;
}
#elif defined(__GNUC_PREREQ)
#if __GNUC_PREREQ(4, 7)
struct refcount {
unsigned int count;
};
/* Initialize the reference counter. */
static inline void
refcount_init(struct refcount *r, unsigned int count)
{
__atomic_store_n(&r->count, count, __ATOMIC_SEQ_CST);
}
/* Increment the reference counter. */
static inline void
refcount_acquire(struct refcount *r)
{
__atomic_fetch_add(&r->count, 1, __ATOMIC_ACQUIRE);
}
/* Decrement the reference counter, returning whether the reference
dropped to zero. */
static inline bool
refcount_release(struct refcount *r)
{
int old = (int)__atomic_fetch_sub(&r->count, 1, __ATOMIC_RELEASE);
bh_assert(old != 0 && "Reference count becoming negative");
return old == 1;
}
#else /* else of __GNUC_PREREQ (4.7) */
#error "Reference counter isn't implemented"
#endif /* end of __GNUC_PREREQ (4.7) */
#else /* else of CONFIG_HAS_STD_ATOMIC */
#error "Reference counter isn't implemented"
#endif /* end of CONFIG_HAS_STD_ATOMIC */

View File

@ -14,6 +14,7 @@
#ifndef SSP_CONFIG_H
#define SSP_CONFIG_H
#include "gnuc.h"
#include <stdlib.h>
#if defined(__FreeBSD__) || defined(__APPLE__) \
@ -107,10 +108,31 @@
#endif
#if !defined(BH_PLATFORM_LINUX_SGX)
/* Clang's __GNUC_PREREQ macro has a different meaning than GCC one,
so we have to handle this case specially */
#if defined(__clang__)
/* Clang provides stdatomic.h since 3.6.0
See https://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html */
#if __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6)
#define CONFIG_HAS_STD_ATOMIC 1
#else
#define CONFIG_HAS_STD_ATOMIC 0
#endif
#elif defined(__GNUC_PREREQ)
/* Even though older versions of GCC support C11, atomics were
not implemented until 4.9. See
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58016 */
#if __GNUC_PREREQ(4, 9)
#define CONFIG_HAS_STD_ATOMIC 1
#else /* else of __GNUC_PREREQ(4, 9) */
#define CONFIG_HAS_STD_ATOMIC 0
#endif /* end of __GNUC_PREREQ(4, 9) */
#else /* else of defined(__GNUC_PREREQ) */
#define CONFIG_HAS_STD_ATOMIC 1
#endif /* end of defined(__GNUC_PREREQ) */
#else /* else of !defined(BH_PLATFORM_LINUX_SGX) */
#define CONFIG_HAS_STD_ATOMIC 0
#endif /* end of !defined(BH_PLATFORM_LINUX_SGX) */
#if !defined(__NuttX__)
#define CONFIG_HAS_D_INO 1

View File

@ -76,6 +76,58 @@ traverse_list(bh_list *l, list_visitor visitor, void *user_data)
}
}
/* Assumes cluster->lock is locked */
static bool
safe_traverse_exec_env_list(WASMCluster *cluster, list_visitor visitor,
void *user_data)
{
Vector proc_nodes;
void *node;
bool ret = true;
if (!bh_vector_init(&proc_nodes, cluster->exec_env_list.len, sizeof(void *),
false)) {
ret = false;
goto final;
}
node = bh_list_first_elem(&cluster->exec_env_list);
while (node) {
bool already_processed = false;
void *proc_node;
for (size_t i = 0; i < bh_vector_size(&proc_nodes); i++) {
if (!bh_vector_get(&proc_nodes, i, &proc_node)) {
ret = false;
goto final;
}
if (proc_node == node) {
already_processed = true;
break;
}
}
if (already_processed) {
node = bh_list_elem_next(node);
continue;
}
os_mutex_unlock(&cluster->lock);
visitor(node, user_data);
os_mutex_lock(&cluster->lock);
if (!bh_vector_append(&proc_nodes, &node)) {
ret = false;
goto final;
}
node = bh_list_first_elem(&cluster->exec_env_list);
}
final:
bh_vector_destroy(&proc_nodes);
return ret;
}
/* The caller must lock cluster->lock */
static bool
allocate_aux_stack(WASMExecEnv *exec_env, uint32 *start, uint32 *size)
@ -344,7 +396,6 @@ wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
os_mutex_unlock(&cluster->debug_inst->wait_lock);
}
#endif
if (bh_list_remove(&cluster->exec_env_list, exec_env) != 0)
ret = false;
@ -478,7 +529,7 @@ fail4:
/* free the allocated aux stack space */
free_aux_stack(exec_env, aux_stack_start);
fail3:
wasm_exec_env_destroy(new_exec_env);
wasm_exec_env_destroy_internal(new_exec_env);
fail2:
wasm_runtime_deinstantiate_internal(new_module_inst, true);
fail1:
@ -616,7 +667,7 @@ fail3:
if (alloc_aux_stack)
free_aux_stack(exec_env, aux_stack_start);
fail2:
wasm_exec_env_destroy(new_exec_env);
wasm_exec_env_destroy_internal(new_exec_env);
fail1:
os_mutex_unlock(&cluster->lock);
@ -786,16 +837,22 @@ wasm_cluster_join_thread(WASMExecEnv *exec_env, void **ret_val)
korp_tid handle;
os_mutex_lock(&cluster_list_lock);
os_mutex_lock(&exec_env->cluster->lock);
if (!clusters_have_exec_env(exec_env) || exec_env->thread_is_detached) {
/* Invalid thread, thread has exited or thread has been detached */
if (ret_val)
*ret_val = NULL;
os_mutex_unlock(&exec_env->cluster->lock);
os_mutex_unlock(&cluster_list_lock);
return 0;
}
exec_env->wait_count++;
handle = exec_env->handle;
os_mutex_unlock(&exec_env->cluster->lock);
os_mutex_unlock(&cluster_list_lock);
return os_thread_join(handle, ret_val);
}
@ -878,15 +935,22 @@ int32
wasm_cluster_cancel_thread(WASMExecEnv *exec_env)
{
os_mutex_lock(&cluster_list_lock);
os_mutex_lock(&exec_env->cluster->lock);
if (!exec_env->cluster) {
goto final;
}
if (!clusters_have_exec_env(exec_env)) {
/* Invalid thread or the thread has exited */
os_mutex_unlock(&cluster_list_lock);
return 0;
goto final;
}
os_mutex_unlock(&cluster_list_lock);
set_thread_cancel_flags(exec_env);
final:
os_mutex_unlock(&exec_env->cluster->lock);
os_mutex_unlock(&cluster_list_lock);
return 0;
}
@ -908,11 +972,9 @@ wasm_cluster_terminate_all(WASMCluster *cluster)
{
os_mutex_lock(&cluster->lock);
cluster->processing = true;
os_mutex_unlock(&cluster->lock);
traverse_list(&cluster->exec_env_list, terminate_thread_visitor, NULL);
safe_traverse_exec_env_list(cluster, terminate_thread_visitor, NULL);
os_mutex_lock(&cluster->lock);
cluster->processing = false;
os_mutex_unlock(&cluster->lock);
}
@ -923,12 +985,10 @@ wasm_cluster_terminate_all_except_self(WASMCluster *cluster,
{
os_mutex_lock(&cluster->lock);
cluster->processing = true;
os_mutex_unlock(&cluster->lock);
traverse_list(&cluster->exec_env_list, terminate_thread_visitor,
safe_traverse_exec_env_list(cluster, terminate_thread_visitor,
(void *)exec_env);
os_mutex_lock(&cluster->lock);
cluster->processing = false;
os_mutex_unlock(&cluster->lock);
}
@ -950,11 +1010,9 @@ wams_cluster_wait_for_all(WASMCluster *cluster)
{
os_mutex_lock(&cluster->lock);
cluster->processing = true;
os_mutex_unlock(&cluster->lock);
traverse_list(&cluster->exec_env_list, wait_for_thread_visitor, NULL);
safe_traverse_exec_env_list(cluster, wait_for_thread_visitor, NULL);
os_mutex_lock(&cluster->lock);
cluster->processing = false;
os_mutex_unlock(&cluster->lock);
}
@ -965,12 +1023,10 @@ wasm_cluster_wait_for_all_except_self(WASMCluster *cluster,
{
os_mutex_lock(&cluster->lock);
cluster->processing = true;
os_mutex_unlock(&cluster->lock);
traverse_list(&cluster->exec_env_list, wait_for_thread_visitor,
safe_traverse_exec_env_list(cluster, wait_for_thread_visitor,
(void *)exec_env);
os_mutex_lock(&cluster->lock);
cluster->processing = false;
os_mutex_unlock(&cluster->lock);
}

View File

@ -1 +0,0 @@
**/Dockerfile

View File

@ -17,27 +17,76 @@ By only including this file in your WASM application you will bind WASI-NN into
To run the tests we assume that the current directory is the root of the repository.
1. Build the docker image,
### Build the runtime
Build the runtime base image,
```
docker build -t wasi-nn -f core/iwasm/libraries/wasi-nn/test/Dockerfile .
docker build -t wasi-nn-base -f core/iwasm/libraries/wasi-nn/test/Dockerfile.base .
```
2. Run the container
Build the runtime image for your execution target type.
`EXECUTION_TYPE` can be:
* `cpu`
* `nvidia-gpu`
```
docker run wasi-nn
EXECUTION_TYPE=cpu
docker build -t wasi-nn-${EXECUTION_TYPE} -f core/iwasm/libraries/wasi-nn/test/Dockerfile.${EXECUTION_TYPE} .
```
### Build wasm app
```
docker build -t wasi-nn-compile -f core/iwasm/libraries/wasi-nn/test/Dockerfile.compile .
```
```
docker run -v $PWD/core/iwasm/libraries/wasi-nn:/wasi-nn wasi-nn-compile
```
### Run wasm app
If all the tests have run properly you will the the following message in the terminal,
```
Tests: passed!
```
* CPU
```
docker run \
-v $PWD/core/iwasm/libraries/wasi-nn/test:/assets wasi-nn-cpu \
--dir=/assets \
--env="TARGET=cpu" \
/assets/test_tensorflow.wasm
```
* (NVIDIA) GPU
```
docker run \
--runtime=nvidia \
-v $PWD/core/iwasm/libraries/wasi-nn/test:/assets wasi-nn-nvidia-gpu \
--dir=/assets \
--env="TARGET=gpu" \
/assets/test_tensorflow.wasm
```
Requirements:
* [NVIDIA docker](https://github.com/NVIDIA/nvidia-docker).
## What is missing
* Only 1 model at a time is supported.
Supported:
* Only 1 WASM app at a time.
* Only 1 model at a time.
* `graph` and `graph-execution-context` are ignored.
* Only `tensorflow` (lite) is supported.
* Only `cpu` is supported.
* Graph encoding: `tensorflowlite`.
* Execution target: `cpu` and `gpu`.
* Tensor type: `fp32`.

View File

@ -1,55 +0,0 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_LOGGER_H
#define WASI_NN_LOGGER_H
#include <stdio.h>
#include <string.h>
#define __FILENAME__ \
(strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
/* Disable a level by removing the define */
#define ENABLE_ERR_LOG
#define ENABLE_WARN_LOG
#define ENABLE_DBG_LOG
#define ENABLE_INFO_LOG
// Definition of the levels
#ifdef ENABLE_ERR_LOG
#define NN_ERR_PRINTF(fmt, ...) \
printf("[%s:%d] " fmt, __FILENAME__, __LINE__, ##__VA_ARGS__); \
printf("\n"); \
fflush(stdout)
#else
#define NN_ERR_PRINTF(fmt, ...)
#endif
#ifdef ENABLE_WARN_LOG
#define NN_WARN_PRINTF(fmt, ...) \
printf("[%s:%d] " fmt, __FILENAME__, __LINE__, ##__VA_ARGS__); \
printf("\n"); \
fflush(stdout)
#else
#define NN_WARN_PRINTF(fmt, ...)
#endif
#ifdef ENABLE_DBG_LOG
#define NN_DBG_PRINTF(fmt, ...) \
printf("[%s:%d] " fmt, __FILENAME__, __LINE__, ##__VA_ARGS__); \
printf("\n"); \
fflush(stdout)
#else
#define NN_DBG_PRINTF(fmt, ...)
#endif
#ifdef ENABLE_INFO_LOG
#define NN_INFO_PRINTF(fmt, ...) \
printf("[%s:%d] " fmt, __FILENAME__, __LINE__, ##__VA_ARGS__); \
printf("\n"); \
fflush(stdout)
#else
#define NN_INFO_PRINTF(fmt, ...)
#endif
#endif

View File

@ -0,0 +1,63 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_LOGGER_H
#define WASI_NN_LOGGER_H
#include <stdio.h>
#include <string.h>
#define __FILENAME__ \
(strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
/* Disable a level by removing the define */
#define ENABLE_ERR_LOG
#define ENABLE_WARN_LOG
#define ENABLE_DBG_LOG
#define ENABLE_INFO_LOG
// Definition of the levels
#ifdef ENABLE_ERR_LOG
#define NN_ERR_PRINTF(fmt, ...) \
do { \
printf("[%s:%d] " fmt, __FILENAME__, __LINE__, ##__VA_ARGS__); \
printf("\n"); \
fflush(stdout); \
} while (0)
#else
#define NN_ERR_PRINTF(fmt, ...)
#endif
#ifdef ENABLE_WARN_LOG
#define NN_WARN_PRINTF(fmt, ...) \
do { \
printf("[%s:%d] " fmt, __FILENAME__, __LINE__, ##__VA_ARGS__); \
printf("\n"); \
fflush(stdout); \
} while (0)
#else
#define NN_WARN_PRINTF(fmt, ...)
#endif
#ifdef ENABLE_DBG_LOG
#define NN_DBG_PRINTF(fmt, ...) \
do { \
printf("[%s:%d] " fmt, __FILENAME__, __LINE__, ##__VA_ARGS__); \
printf("\n"); \
fflush(stdout); \
} while (0)
#else
#define NN_DBG_PRINTF(fmt, ...)
#endif
#ifdef ENABLE_INFO_LOG
#define NN_INFO_PRINTF(fmt, ...) \
do { \
printf("[%s:%d] " fmt, __FILENAME__, __LINE__, ##__VA_ARGS__); \
printf("\n"); \
fflush(stdout); \
} while (0)
#else
#define NN_INFO_PRINTF(fmt, ...)
#endif
#endif

View File

@ -0,0 +1,163 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "wasi_nn_app_native.h"
static error
graph_builder_app_native(wasm_module_inst_t instance,
graph_builder_wasm *builder_wasm,
graph_builder *builder)
{
if (!wasm_runtime_validate_app_addr(instance, builder_wasm->buf_offset,
builder_wasm->size * sizeof(uint8_t))) {
NN_ERR_PRINTF("builder_wasm->buf_offset is invalid");
return invalid_argument;
}
builder->buf = (uint8_t *)wasm_runtime_addr_app_to_native(
instance, builder_wasm->buf_offset);
builder->size = builder_wasm->size;
return success;
}
error
graph_builder_array_app_native(wasm_module_inst_t instance,
graph_builder_array_wasm *builder_array_wasm,
graph_builder_array *builder_array)
{
if (!wasm_runtime_validate_native_addr(instance, builder_array_wasm,
sizeof(graph_builder_array_wasm))) {
NN_ERR_PRINTF("builder_array_wasm is invalid");
return invalid_argument;
}
NN_DBG_PRINTF("Graph builder array contains %d elements",
builder_array_wasm->size);
if (!wasm_runtime_validate_app_addr(
instance, builder_array_wasm->buf_offset,
builder_array_wasm->size * sizeof(graph_builder_wasm))) {
NN_ERR_PRINTF("builder_array_wasm->buf_offset is invalid");
return invalid_argument;
}
graph_builder_wasm *builder_wasm =
(graph_builder_wasm *)wasm_runtime_addr_app_to_native(
instance, builder_array_wasm->buf_offset);
graph_builder *builder = (graph_builder *)wasm_runtime_malloc(
builder_array_wasm->size * sizeof(graph_builder));
if (builder == NULL)
return missing_memory;
for (uint32_t i = 0; i < builder_array_wasm->size; ++i) {
error res;
if (success
!= (res = graph_builder_app_native(instance, &builder_wasm[i],
&builder[i]))) {
wasm_runtime_free(builder);
return res;
}
NN_DBG_PRINTF("Graph builder %d contains %d elements", i,
builder->size);
}
builder_array->buf = builder;
builder_array->size = builder_array_wasm->size;
return success;
}
static error
tensor_data_app_native(wasm_module_inst_t instance, uint32_t total_elements,
tensor_wasm *input_tensor_wasm, tensor_data *data)
{
if (!wasm_runtime_validate_app_addr(
instance, input_tensor_wasm->data_offset, total_elements)) {
NN_ERR_PRINTF("input_tensor_wasm->data_offset is invalid");
return invalid_argument;
}
*data = (tensor_data)wasm_runtime_addr_app_to_native(
instance, input_tensor_wasm->data_offset);
return success;
}
static error
tensor_dimensions_app_native(wasm_module_inst_t instance,
tensor_wasm *input_tensor_wasm,
tensor_dimensions **dimensions)
{
if (!wasm_runtime_validate_app_addr(instance,
input_tensor_wasm->dimensions_offset,
sizeof(tensor_dimensions_wasm))) {
NN_ERR_PRINTF("input_tensor_wasm->dimensions_offset is invalid");
return invalid_argument;
}
tensor_dimensions_wasm *dimensions_wasm =
(tensor_dimensions_wasm *)wasm_runtime_addr_app_to_native(
instance, input_tensor_wasm->dimensions_offset);
if (!wasm_runtime_validate_app_addr(instance, dimensions_wasm->buf_offset,
sizeof(tensor_dimensions))) {
NN_ERR_PRINTF("dimensions_wasm->buf_offset is invalid");
return invalid_argument;
}
*dimensions =
(tensor_dimensions *)wasm_runtime_malloc(sizeof(tensor_dimensions));
if (dimensions == NULL)
return missing_memory;
(*dimensions)->size = dimensions_wasm->size;
(*dimensions)->buf = (uint32_t *)wasm_runtime_addr_app_to_native(
instance, dimensions_wasm->buf_offset);
NN_DBG_PRINTF("Number of dimensions: %d", (*dimensions)->size);
return success;
}
error
tensor_app_native(wasm_module_inst_t instance, tensor_wasm *input_tensor_wasm,
tensor *input_tensor)
{
NN_DBG_PRINTF("Converting tensor_wasm to tensor");
if (!wasm_runtime_validate_native_addr(instance, input_tensor_wasm,
sizeof(tensor_wasm))) {
NN_ERR_PRINTF("input_tensor_wasm is invalid");
return invalid_argument;
}
error res;
tensor_dimensions *dimensions = NULL;
if (success
!= (res = tensor_dimensions_app_native(instance, input_tensor_wasm,
&dimensions))) {
NN_ERR_PRINTF("error when parsing dimensions");
return res;
}
uint32_t total_elements = 1;
for (uint32_t i = 0; i < dimensions->size; ++i) {
total_elements *= dimensions->buf[i];
NN_DBG_PRINTF("Dimension %d: %d", i, dimensions->buf[i]);
}
NN_DBG_PRINTF("Tensor type: %d", input_tensor_wasm->type);
NN_DBG_PRINTF("Total number of elements: %d", total_elements);
tensor_data data = NULL;
if (success
!= (res = tensor_data_app_native(instance, total_elements,
input_tensor_wasm, &data))) {
wasm_runtime_free(dimensions);
return res;
}
input_tensor->type = input_tensor_wasm->type;
input_tensor->dimensions = dimensions;
input_tensor->data = data;
return success;
}

View File

@ -0,0 +1,51 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_APP_NATIVE
#define WASI_NN_APP_NATIVE
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <errno.h>
#include <string.h>
#include "wasi_nn.h"
#include "logger.h"
#include "bh_platform.h"
#include "wasm_export.h"
typedef struct {
uint32_t buf_offset;
uint32_t size;
} graph_builder_wasm;
typedef struct {
uint32_t buf_offset;
uint32_t size;
} graph_builder_array_wasm;
typedef struct {
uint32_t buf_offset;
uint32_t size;
} tensor_dimensions_wasm;
typedef struct {
uint32_t dimensions_offset;
tensor_type type;
uint32_t data_offset;
} tensor_wasm;
error
graph_builder_array_app_native(wasm_module_inst_t instance,
graph_builder_array_wasm *builder,
graph_builder_array *builder_native);
error
tensor_app_native(wasm_module_inst_t instance, tensor_wasm *input_tensor,
tensor *input_tensor_native);
#endif

View File

@ -0,0 +1,302 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <assert.h>
#include <errno.h>
#include <string.h>
#include "wasi_nn.h"
#include "wasi_nn_app_native.h"
#include "logger.h"
#include "wasi_nn_tensorflowlite.hpp"
#include "bh_platform.h"
#include "wasm_export.h"
#include "wasm_runtime.h"
#include "aot_runtime.h"
/* Definition of 'wasi_nn.h' structs in WASM app format (using offset) */
typedef error (*LOAD)(graph_builder_array *, graph_encoding, execution_target,
graph *);
typedef error (*INIT_EXECUTION_CONTEXT)(graph, graph_execution_context *);
typedef error (*SET_INPUT)(graph_execution_context, uint32_t, tensor *);
typedef error (*COMPUTE)(graph_execution_context);
typedef error (*GET_OUTPUT)(graph_execution_context, uint32_t, tensor_data,
uint32_t *);
typedef struct {
LOAD load;
INIT_EXECUTION_CONTEXT init_execution_context;
SET_INPUT set_input;
COMPUTE compute;
GET_OUTPUT get_output;
} api_function;
/* Global variables */
static api_function lookup[] = {
{ NULL, NULL, NULL, NULL, NULL },
{ NULL, NULL, NULL, NULL, NULL },
{ NULL, NULL, NULL, NULL, NULL },
{ NULL, NULL, NULL, NULL, NULL },
{ tensorflowlite_load, tensorflowlite_init_execution_context,
tensorflowlite_set_input, tensorflowlite_compute,
tensorflowlite_get_output }
};
/* Utils */
static bool
is_encoding_implemented(graph_encoding encoding)
{
return lookup[encoding].load && lookup[encoding].init_execution_context
&& lookup[encoding].set_input && lookup[encoding].compute
&& lookup[encoding].get_output;
}
static error
is_model_initialized(WASINNContext *wasi_nn_ctx)
{
if (!wasi_nn_ctx->is_initialized) {
NN_ERR_PRINTF("Model not initialized.");
return runtime_error;
}
return success;
}
WASINNContext *
wasm_runtime_get_wasi_nn_ctx(wasm_module_inst_t instance)
{
WASINNContext *wasi_nn_ctx = NULL;
#if WASM_ENABLE_INTERP != 0
if (instance->module_type == Wasm_Module_Bytecode) {
NN_DBG_PRINTF("Getting ctx from WASM");
WASMModuleInstance *module_inst = (WASMModuleInstance *)instance;
wasi_nn_ctx = ((WASMModuleInstanceExtra *)module_inst->e)->wasi_nn_ctx;
}
#endif
#if WASM_ENABLE_AOT != 0
if (instance->module_type == Wasm_Module_AoT) {
NN_DBG_PRINTF("Getting ctx from AOT");
AOTModuleInstance *module_inst = (AOTModuleInstance *)instance;
wasi_nn_ctx = ((AOTModuleInstanceExtra *)module_inst->e)->wasi_nn_ctx;
}
#endif
bh_assert(wasi_nn_ctx != NULL);
NN_DBG_PRINTF("Returning ctx");
return wasi_nn_ctx;
}
/* WASI-NN implementation */
error
wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
graph_encoding encoding, execution_target target, graph *g)
{
NN_DBG_PRINTF("Running wasi_nn_load [encoding=%d, target=%d]...", encoding,
target);
if (!is_encoding_implemented(encoding)) {
NN_ERR_PRINTF("Encoding not supported.");
return invalid_encoding;
}
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
bh_assert(instance);
error res;
graph_builder_array builder_native = { 0 };
if (success
!= (res = graph_builder_array_app_native(instance, builder,
&builder_native)))
return res;
if (!wasm_runtime_validate_native_addr(instance, g, sizeof(graph))) {
NN_ERR_PRINTF("graph is invalid");
res = invalid_argument;
goto fail;
}
res = lookup[encoding].load(&builder_native, encoding, target, g);
NN_DBG_PRINTF("wasi_nn_load finished with status %d [graph=%d]", res, *g);
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_ctx->current_encoding = encoding;
wasi_nn_ctx->is_initialized = true;
fail:
// XXX: Free intermediate structure pointers
if (builder_native.buf)
wasm_runtime_free(builder_native.buf);
return res;
}
error
wasi_nn_init_execution_context(wasm_exec_env_t exec_env, graph g,
graph_execution_context *ctx)
{
NN_DBG_PRINTF("Running wasi_nn_init_execution_context [graph=%d]...", g);
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
bh_assert(instance);
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
return res;
if (!wasm_runtime_validate_native_addr(instance, ctx,
sizeof(graph_execution_context))) {
NN_ERR_PRINTF("ctx is invalid");
return invalid_argument;
}
res = lookup[wasi_nn_ctx->current_encoding].init_execution_context(g, ctx);
*ctx = g;
NN_DBG_PRINTF(
"wasi_nn_init_execution_context finished with status %d [ctx=%d]", res,
*ctx);
return res;
}
error
wasi_nn_set_input(wasm_exec_env_t exec_env, graph_execution_context ctx,
uint32_t index, tensor_wasm *input_tensor)
{
NN_DBG_PRINTF("Running wasi_nn_set_input [ctx=%d, index=%d]...", ctx,
index);
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
bh_assert(instance);
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
return res;
tensor input_tensor_native = { 0 };
if (success
!= (res = tensor_app_native(instance, input_tensor,
&input_tensor_native)))
return res;
res = lookup[wasi_nn_ctx->current_encoding].set_input(ctx, index,
&input_tensor_native);
// XXX: Free intermediate structure pointers
if (input_tensor_native.dimensions)
wasm_runtime_free(input_tensor_native.dimensions);
NN_DBG_PRINTF("wasi_nn_set_input finished with status %d", res);
return res;
}
error
wasi_nn_compute(wasm_exec_env_t exec_env, graph_execution_context ctx)
{
NN_DBG_PRINTF("Running wasi_nn_compute [ctx=%d]...", ctx);
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
bh_assert(instance);
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
return res;
res = lookup[wasi_nn_ctx->current_encoding].compute(ctx);
NN_DBG_PRINTF("wasi_nn_compute finished with status %d", res);
return res;
}
error
wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
uint32_t index, tensor_data output_tensor,
uint32_t *output_tensor_size)
{
NN_DBG_PRINTF("Running wasi_nn_get_output [ctx=%d, index=%d]...", ctx,
index);
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
bh_assert(instance);
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
return res;
if (!wasm_runtime_validate_native_addr(instance, output_tensor_size,
sizeof(uint32_t))) {
NN_ERR_PRINTF("output_tensor_size is invalid");
return invalid_argument;
}
res = lookup[wasi_nn_ctx->current_encoding].get_output(
ctx, index, output_tensor, output_tensor_size);
NN_DBG_PRINTF("wasi_nn_get_output finished with status %d [data_size=%d]",
res, *output_tensor_size);
return res;
}
/* Non-exposed public functions */
WASINNContext *
wasi_nn_initialize()
{
NN_DBG_PRINTF("Initializing wasi-nn");
WASINNContext *wasi_nn_ctx =
(WASINNContext *)wasm_runtime_malloc(sizeof(WASINNContext));
if (wasi_nn_ctx == NULL) {
NN_ERR_PRINTF("Error when allocating memory for WASI-NN context");
return NULL;
}
wasi_nn_ctx->is_initialized = true;
wasi_nn_ctx->current_encoding = 3;
return wasi_nn_ctx;
}
void
wasi_nn_destroy(WASINNContext *wasi_nn_ctx)
{
if (wasi_nn_ctx == NULL) {
NN_ERR_PRINTF(
"Error when deallocating memory. WASI-NN context is NULL");
return;
}
NN_DBG_PRINTF("Freeing wasi-nn");
NN_DBG_PRINTF("-> is_initialized: %d", wasi_nn_ctx->is_initialized);
NN_DBG_PRINTF("-> current_encoding: %d", wasi_nn_ctx->current_encoding);
tensorflowlite_destroy();
wasm_runtime_free(wasi_nn_ctx);
}
/* Register WASI-NN in WAMR */
/* clang-format off */
#define REG_NATIVE_FUNC(func_name, signature) \
{ #func_name, wasi_nn_##func_name, signature, NULL }
/* clang-format on */
static NativeSymbol native_symbols_wasi_nn[] = {
REG_NATIVE_FUNC(load, "(*ii*)i"),
REG_NATIVE_FUNC(init_execution_context, "(i*)i"),
REG_NATIVE_FUNC(set_input, "(ii*)i"),
REG_NATIVE_FUNC(compute, "(i)i"),
REG_NATIVE_FUNC(get_output, "(ii**)i"),
};
uint32_t
get_wasi_nn_export_apis(NativeSymbol **p_libc_wasi_apis)
{
*p_libc_wasi_apis = native_symbols_wasi_nn;
return sizeof(native_symbols_wasi_nn) / sizeof(NativeSymbol);
}

View File

@ -0,0 +1,30 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_PRIVATE_H
#define WASI_NN_PRIVATE_H
#include "wasi_nn_types.h"
typedef struct {
bool is_initialized;
graph_encoding current_encoding;
} WASINNContext;
/**
* @brief Initialize wasi-nn
*
*/
WASINNContext *
wasi_nn_initialize();
/**
* @brief Destroy wasi-nn on app exists
*
*/
void
wasi_nn_destroy(WASINNContext *wasi_nn_ctx);
#endif

View File

@ -3,8 +3,10 @@
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "wasi_nn_tensorflow.hpp"
#include "wasi_nn_common.h"
#include "wasi_nn.h"
#include "wasi_nn_tensorflowlite.hpp"
#include "logger.h"
#include "bh_common.h"
#include "bh_platform.h"
#include "platform_common.h"
@ -14,6 +16,7 @@
#include <tensorflow/lite/model.h>
#include <tensorflow/lite/optional_debug_tools.h>
#include <tensorflow/lite/error_reporter.h>
#include <tensorflow/lite/delegates/gpu/delegate.h>
/* Global variables */
@ -25,30 +28,30 @@ static char *model_pointer = NULL;
/* WASI-NN (tensorflow) implementation */
error
tensorflow_load(graph_builder_array builder, graph_encoding encoding,
execution_target target, graph *graph)
tensorflowlite_load(graph_builder_array *builder, graph_encoding encoding,
execution_target target, graph *g)
{
if (model_pointer != NULL) {
wasm_runtime_free(model_pointer);
model_pointer = NULL;
}
if (builder.size != 1) {
if (builder->size != 1) {
NN_ERR_PRINTF("Unexpected builder format.");
return invalid_argument;
}
if (encoding != tensorflow) {
NN_ERR_PRINTF("Encoding is not tensorflow.");
if (encoding != tensorflowlite) {
NN_ERR_PRINTF("Encoding is not tensorflowlite.");
return invalid_argument;
}
if (target != cpu) {
NN_ERR_PRINTF("Only CPU target is supported.");
if (target != cpu && target != gpu) {
NN_ERR_PRINTF("Only CPU and GPU target is supported.");
return invalid_argument;
}
uint32_t size = builder.buf[0].size;
uint32_t size = builder->buf[0].size;
model_pointer = (char *)wasm_runtime_malloc(size);
if (model_pointer == NULL) {
@ -56,7 +59,7 @@ tensorflow_load(graph_builder_array builder, graph_encoding encoding,
return missing_memory;
}
bh_memcpy_s(model_pointer, size, builder.buf[0].buf, size);
bh_memcpy_s(model_pointer, size, builder->buf[0].buf, size);
model = tflite::FlatBufferModel::BuildFromBuffer(model_pointer, size, NULL);
if (model == NULL) {
@ -77,11 +80,34 @@ tensorflow_load(graph_builder_array builder, graph_encoding encoding,
return missing_memory;
}
bool use_default = false;
switch (target) {
case gpu:
{
// https://www.tensorflow.org/lite/performance/gpu
auto options = TfLiteGpuDelegateOptionsV2Default();
options.inference_preference =
TFLITE_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED;
options.inference_priority1 =
TFLITE_GPU_INFERENCE_PRIORITY_MIN_LATENCY;
auto *delegate = TfLiteGpuDelegateV2Create(&options);
if (interpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk) {
NN_ERR_PRINTF("Error when enabling GPU delegate.");
use_default = true;
}
break;
}
default:
use_default = true;
}
if (use_default)
NN_WARN_PRINTF("Default encoding is CPU.");
return success;
}
error
tensorflow_init_execution_context(graph graph)
tensorflowlite_init_execution_context(graph g, graph_execution_context *ctx)
{
if (interpreter == NULL) {
NN_ERR_PRINTF("Non-initialized interpreter.");
@ -92,7 +118,7 @@ tensorflow_init_execution_context(graph graph)
}
error
tensorflow_set_input(graph_execution_context ctx, uint32_t index,
tensorflowlite_set_input(graph_execution_context ctx, uint32_t index,
tensor *input_tensor)
{
if (interpreter == NULL) {
@ -113,11 +139,11 @@ tensorflow_set_input(graph_execution_context ctx, uint32_t index,
}
uint32_t model_tensor_size = 1;
for (int i = 0; i < (int)tensor->dims->size; ++i)
for (int i = 0; i < tensor->dims->size; ++i)
model_tensor_size *= (uint32_t)tensor->dims->data[i];
uint32_t input_tensor_size = 1;
for (int i = 0; i < input_tensor->dimensions->size; i++)
for (uint32_t i = 0; i < input_tensor->dimensions->size; i++)
input_tensor_size *= (uint32_t)input_tensor->dimensions->buf[i];
if (model_tensor_size != input_tensor_size) {
@ -136,7 +162,7 @@ tensorflow_set_input(graph_execution_context ctx, uint32_t index,
}
error
tensorflow_compute(graph_execution_context ctx)
tensorflowlite_compute(graph_execution_context ctx)
{
if (interpreter == NULL) {
NN_ERR_PRINTF("Non-initialized interpreter.");
@ -147,8 +173,9 @@ tensorflow_compute(graph_execution_context ctx)
}
error
tensorflow_get_output(graph_execution_context context, uint32_t index,
tensor_data output_tensor, uint32_t *output_tensor_size)
tensorflowlite_get_output(graph_execution_context ctx, uint32_t index,
tensor_data output_tensor,
uint32_t *output_tensor_size)
{
if (interpreter == NULL) {
NN_ERR_PRINTF("Non-initialized interpreter.");
@ -178,7 +205,7 @@ tensorflow_get_output(graph_execution_context context, uint32_t index,
}
float *tensor_f = interpreter->typed_output_tensor<float>(index);
for (int i = 0; i < model_tensor_size; ++i)
for (uint32_t i = 0; i < model_tensor_size; ++i)
NN_DBG_PRINTF("output: %f", tensor_f[i]);
*output_tensor_size = model_tensor_size;
@ -186,3 +213,22 @@ tensorflow_get_output(graph_execution_context context, uint32_t index,
model_tensor_size * sizeof(float));
return success;
}
void
tensorflowlite_destroy()
{
/*
TensorFlow Lite memory is man
Related issues:
* https://github.com/tensorflow/tensorflow/issues/15880
*/
NN_DBG_PRINTF("Freeing memory.");
model.reset(nullptr);
model = NULL;
interpreter.reset(nullptr);
interpreter = NULL;
wasm_runtime_free(model_pointer);
model_pointer = NULL;
NN_DBG_PRINTF("Memory free'd.");
}

View File

@ -0,0 +1,41 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_TENSORFLOWLITE_HPP
#define WASI_NN_TENSORFLOWLITE_HPP
#include "wasi_nn.h"
#ifdef __cplusplus
extern "C" {
#endif
error
tensorflowlite_load(graph_builder_array *builder, graph_encoding encoding,
execution_target target, graph *g);
error
tensorflowlite_init_execution_context(graph g, graph_execution_context *ctx);
error
tensorflowlite_set_input(graph_execution_context ctx, uint32_t index,
tensor *input_tensor);
error
tensorflowlite_compute(graph_execution_context ctx);
error
tensorflowlite_get_output(graph_execution_context ctx, uint32_t index,
tensor_data output_tensor,
uint32_t *output_tensor_size);
void
tensorflowlite_destroy();
#ifdef __cplusplus
}
#endif
#endif

View File

@ -7,10 +7,10 @@ project (iwasm)
set (CMAKE_VERBOSE_MAKEFILE OFF)
# Reset default linker flags
set (CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "")
set (CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
set (CMAKE_C_STANDARD 99)
set (CMAKE_CXX_STANDARD 14)
set (CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "")
set (CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
if (NOT DEFINED WAMR_BUILD_PLATFORM)
set (WAMR_BUILD_PLATFORM "linux")

View File

@ -1,32 +0,0 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
FROM ubuntu:22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y \
cmake build-essential git wget python3.10 python3-pip
RUN wget -q https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-14/wasi-sdk-14.0-linux.tar.gz && \
tar xf wasi-sdk-*-linux.tar.gz -C /opt && rm -f wasi-sdk-*-linux.tar.gz && \
mv /opt/wasi-sdk-14.0 /opt/wasi-sdk
WORKDIR /home/wamr
COPY core core
COPY build-scripts build-scripts
COPY product-mini product-mini
RUN pip3 install -r core/iwasm/libraries/wasi-nn/test/requirements.txt
WORKDIR /home/wamr/core/iwasm/libraries/wasi-nn/test/build
RUN cmake -DWAMR_BUILD_WASI_NN=1 ..
RUN make -j $(grep -c ^processor /proc/cpuinfo)
WORKDIR /home/wamr/core/iwasm/libraries/wasi-nn/test
RUN ./build.sh
ENTRYPOINT [ "./build/iwasm", "--dir=.", "test_tensorflow.wasm" ]

View File

@ -0,0 +1,22 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
FROM ubuntu:20.04 AS base
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y \
cmake build-essential git
WORKDIR /home/wamr
COPY . .
WORKDIR /home/wamr/core/iwasm/libraries/wasi-nn/test/build
RUN cmake \
-DWAMR_BUILD_WASI_NN=1 \
-DTFLITE_ENABLE_GPU=ON \
..
RUN make -j $(grep -c ^processor /proc/cpuinfo)

View File

@ -0,0 +1,23 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
FROM ubuntu:20.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y \
cmake build-essential git wget python3.10 python3-pip
ARG WASI_SDK_VER=19
RUN wget -c --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-${WASI_SDK_VER}/wasi-sdk-${WASI_SDK_VER}.0-linux.tar.gz -P /opt \
&& tar xf /opt/wasi-sdk-${WASI_SDK_VER}.0-linux.tar.gz -C /opt \
&& ln -fs /opt/wasi-sdk-${WASI_SDK_VER}.0 /opt/wasi-sdk \
&& rm /opt/wasi-sdk-${WASI_SDK_VER}.0-linux.tar.gz
WORKDIR /wasi-nn/test
COPY core/iwasm/libraries/wasi-nn/test/requirements.txt .
RUN pip3 install -r requirements.txt && rm requirements.txt
ENTRYPOINT [ "bash", "./build.sh" ]

View File

@ -0,0 +1,8 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
FROM ubuntu:20.04
COPY --from=wasi-nn-base /home/wamr/core/iwasm/libraries/wasi-nn/test/build/iwasm /run/iwasm
ENTRYPOINT [ "/run/iwasm" ]

View File

@ -0,0 +1,20 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
FROM nvidia/cuda:11.3.0-runtime-ubuntu20.04
RUN apt-get update && apt-get install -y --no-install-recommends \
ocl-icd-libopencl1 \
ocl-icd-opencl-dev \
clinfo && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /etc/OpenCL/vendors && \
echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd
ENV NVIDIA_VISIBLE_DEVICES=all
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
COPY --from=wasi-nn-base /home/wamr/core/iwasm/libraries/wasi-nn/test/build/iwasm /run/iwasm
ENTRYPOINT [ "/run/iwasm" ]

View File

@ -7,7 +7,7 @@
-Wl,--allow-undefined \
-Wl,--strip-all,--no-entry \
--sysroot=/opt/wasi-sdk/share/wasi-sysroot \
-I/home/wamr/core/iwasm/libraries/wasi-nn \
-I.. \
-o test_tensorflow.wasm test_tensorflow.c
# TFLite models to use in the tests

78
core/iwasm/libraries/wasi-nn/test/test_tensorflow.c Executable file → Normal file
View File

@ -28,7 +28,7 @@ typedef struct {
// WASI-NN wrappers
error
wasm_load(char *model_name, graph *graph)
wasm_load(char *model_name, graph *g, execution_target target)
{
FILE *pFile = fopen(model_name, "r");
if (pFile == NULL)
@ -64,7 +64,7 @@ wasm_load(char *model_name, graph *graph)
arr.buf[0].size = result;
arr.buf[0].buf = buffer;
error res = load(&arr, tensorflow, cpu, graph);
error res = load(&arr, tensorflowlite, target, g);
fclose(pFile);
free(buffer);
@ -73,13 +73,13 @@ wasm_load(char *model_name, graph *graph)
}
error
wasm_init_execution_context(graph graph, graph_execution_context *ctx)
wasm_init_execution_context(graph g, graph_execution_context *ctx)
{
return init_execution_context(graph, ctx);
return init_execution_context(g, ctx);
}
error
wasm_input(graph_execution_context ctx, float *input_tensor, uint32_t *dim)
wasm_set_input(graph_execution_context ctx, float *input_tensor, uint32_t *dim)
{
tensor_dimensions dims;
dims.size = INPUT_TENSOR_DIMS;
@ -115,11 +115,12 @@ wasm_get_output(graph_execution_context ctx, uint32_t index, float *out_tensor,
// Inference
float *
run_inference(float *input, uint32_t *input_size, uint32_t *output_size,
char *model_name, uint32_t num_output_tensors)
run_inference(execution_target target, float *input, uint32_t *input_size,
uint32_t *output_size, char *model_name,
uint32_t num_output_tensors)
{
graph graph;
if (wasm_load(model_name, &graph) != success) {
if (wasm_load(model_name, &graph, target) != success) {
fprintf(stderr, "Error when loading model.");
exit(1);
}
@ -130,7 +131,7 @@ run_inference(float *input, uint32_t *input_size, uint32_t *output_size,
exit(1);
}
if (wasm_input(ctx, input, input_size) != success) {
if (wasm_set_input(ctx, input, input_size) != success) {
fprintf(stderr, "Error when setting input tensor.");
exit(1);
}
@ -151,7 +152,7 @@ run_inference(float *input, uint32_t *input_size, uint32_t *output_size,
*output_size = MAX_OUTPUT_TENSOR_SIZE - *output_size;
if (wasm_get_output(ctx, i, &out_tensor[offset], output_size)
!= success) {
fprintf(stderr, "Error when getting input .");
fprintf(stderr, "Error when getting output .");
exit(1);
}
@ -185,14 +186,14 @@ create_input(int *dims)
// TESTS
void
test_sum()
test_sum(execution_target target)
{
int dims[] = { 1, 5, 5, 1 };
input_info input = create_input(dims);
uint32_t output_size = 0;
float *output = run_inference(input.input_tensor, input.dim, &output_size,
"models/sum.tflite", 1);
float *output = run_inference(target, input.input_tensor, input.dim,
&output_size, "/assets/models/sum.tflite", 1);
assert(output_size == 1);
assert(fabs(output[0] - 300.0) < EPSILON);
@ -203,14 +204,14 @@ test_sum()
}
void
test_max()
test_max(execution_target target)
{
int dims[] = { 1, 5, 5, 1 };
input_info input = create_input(dims);
uint32_t output_size = 0;
float *output = run_inference(input.input_tensor, input.dim, &output_size,
"models/max.tflite", 1);
float *output = run_inference(target, input.input_tensor, input.dim,
&output_size, "/assets/models/max.tflite", 1);
assert(output_size == 1);
assert(fabs(output[0] - 24.0) < EPSILON);
@ -222,14 +223,15 @@ test_max()
}
void
test_average()
test_average(execution_target target)
{
int dims[] = { 1, 5, 5, 1 };
input_info input = create_input(dims);
uint32_t output_size = 0;
float *output = run_inference(input.input_tensor, input.dim, &output_size,
"models/average.tflite", 1);
float *output =
run_inference(target, input.input_tensor, input.dim, &output_size,
"/assets/models/average.tflite", 1);
assert(output_size == 1);
assert(fabs(output[0] - 12.0) < EPSILON);
@ -241,14 +243,15 @@ test_average()
}
void
test_mult_dimensions()
test_mult_dimensions(execution_target target)
{
int dims[] = { 1, 3, 3, 1 };
input_info input = create_input(dims);
uint32_t output_size = 0;
float *output = run_inference(input.input_tensor, input.dim, &output_size,
"models/mult_dim.tflite", 1);
float *output =
run_inference(target, input.input_tensor, input.dim, &output_size,
"/assets/models/mult_dim.tflite", 1);
assert(output_size == 9);
for (int i = 0; i < 9; i++)
@ -260,14 +263,15 @@ test_mult_dimensions()
}
void
test_mult_outputs()
test_mult_outputs(execution_target target)
{
int dims[] = { 1, 4, 4, 1 };
input_info input = create_input(dims);
uint32_t output_size = 0;
float *output = run_inference(input.input_tensor, input.dim, &output_size,
"models/mult_out.tflite", 2);
float *output =
run_inference(target, input.input_tensor, input.dim, &output_size,
"/assets/models/mult_out.tflite", 2);
assert(output_size == 8);
// first tensor check
@ -285,16 +289,30 @@ test_mult_outputs()
int
main()
{
char *env = getenv("TARGET");
if (env == NULL) {
printf("Usage:\n--env=\"TARGET=[cpu|gpu]\"\n");
return 1;
}
execution_target target;
if (strcmp(env, "cpu") == 0)
target = cpu;
else if (strcmp(env, "gpu") == 0)
target = gpu;
else {
printf("Wrong target!");
return 1;
}
printf("################### Testing sum...\n");
test_sum();
test_sum(target);
printf("################### Testing max...\n");
test_max();
test_max(target);
printf("################### Testing average...\n");
test_average();
test_average(target);
printf("################### Testing multiple dimensions...\n");
test_mult_dimensions();
test_mult_dimensions(target);
printf("################### Testing multiple outputs...\n");
test_mult_outputs();
test_mult_outputs(target);
printf("Tests: passed!\n");
return 0;

View File

@ -5,6 +5,15 @@ set (WASI_NN_DIR ${CMAKE_CURRENT_LIST_DIR})
add_definitions (-DWASM_ENABLE_WASI_NN=1)
set (LIBC_WASI_NN_SOURCE ${WASI_NN_DIR}/wasi_nn_native.c ${WASI_NN_DIR}/wasi_nn_tensorflow.cpp)
include_directories (${WASI_NN_DIR})
include_directories (${WASI_NN_DIR}/src)
include_directories (${WASI_NN_DIR}/src/utils)
set (
LIBC_WASI_NN_SOURCE
${WASI_NN_DIR}/src/wasi_nn.c
${WASI_NN_DIR}/src/wasi_nn_tensorflowlite.cpp
${WASI_NN_DIR}/src/utils/wasi_nn_app_native.c
)
set (TENSORFLOW_LIB tensorflow-lite)

View File

@ -3,63 +3,17 @@
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_WASM_H
#define WASI_NN_WASM_H
#include "wasi_nn_common.h"
/**
* Following definition from:
* [Aug 10th, 2022]
* https://github.com/WebAssembly/wasi-nn/blob/e5e1a6c31f424c7cd63026cd270e9746775675a0/wasi-nn.wit.md
* [Oct 25th, 2022]
* https://github.com/WebAssembly/wasi-nn/blob/0f77c48ec195748990ff67928a4b3eef5f16c2de/wasi-nn.wit.md
*/
/* The graph initialization data. */
#ifndef WASI_NN_H
#define WASI_NN_H
// This consists of an array of buffers because implementing backends may encode
// their graph IR in parts (e.g., OpenVINO stores its IR and weights
// separately).
typedef struct {
uint8_t *buf;
uint32_t size;
} graph_builder;
typedef struct {
graph_builder *buf;
uint32_t size;
} graph_builder_array;
/* The dimensions of a tensor. */
// The array length matches the tensor rank and each element in the array
// describes the size of each dimension.
typedef struct {
uint32_t *buf;
uint32_t size;
} tensor_dimensions;
/* The tensor data. */
// Initially conceived as a sparse representation, each empty cell would be
// filled with zeros and the array length must match the product of all of the
// dimensions and the number of bytes in the type (e.g., a 2x2 tensor with
// 4-byte f32 elements would have a data array of length 16). Naturally, this
// representation requires some knowledge of how to lay out data in
// memory--e.g., using row-major ordering--and could perhaps be improved.
typedef uint8_t *tensor_data;
/* A tensor. */
typedef struct {
// Describe the size of the tensor (e.g., 2x2x2x2 -> [2, 2, 2, 2]). To
// represent a tensor containing a single value, use `[1]` for the tensor
// dimensions.
tensor_dimensions *dimensions;
// Describe the type of element in the tensor (e.g., f32).
tensor_type type;
// Contains the tensor data.
tensor_data data;
} tensor;
#include <stdint.h>
#include "wasi_nn_types.h"
/**
* @brief Load an opaque sequence of bytes to use for inference.
@ -67,25 +21,31 @@ typedef struct {
* @param builder Model builder.
* @param encoding Model encoding.
* @param target Execution target.
* @param graph Graph.
* @param g Graph.
* @return error Execution status.
*/
error
load(graph_builder_array *builder, graph_encoding encoding,
execution_target target, graph *graph)
__attribute__((export_module("wasi_nn")))
execution_target target, graph *g)
__attribute__((import_module("wasi_nn")));
/**
* INFERENCE
*
*/
// Bind a `graph` to the input and output tensors for an inference.
typedef uint32_t graph_execution_context;
/**
* @brief Create an execution instance of a loaded graph.
*
* @param graph Graph.
* @param g Graph.
* @param ctx Execution context.
* @return error Execution status.
*/
error
init_execution_context(graph graph, graph_execution_context *ctx)
__attribute__((export_module("wasi_nn")))
init_execution_context(graph g, graph_execution_context *ctx)
__attribute__((import_module("wasi_nn")));
/**
@ -98,7 +58,6 @@ init_execution_context(graph graph, graph_execution_context *ctx)
*/
error
set_input(graph_execution_context ctx, uint32_t index, tensor *tensor)
__attribute__((export_module("wasi_nn")))
__attribute__((import_module("wasi_nn")));
/**
@ -108,8 +67,7 @@ set_input(graph_execution_context ctx, uint32_t index, tensor *tensor)
* @return error Execution status.
*/
error
compute(graph_execution_context ctx) __attribute__((export_module("wasi_nn")))
__attribute__((import_module("wasi_nn")));
compute(graph_execution_context ctx) __attribute__((import_module("wasi_nn")));
/**
* @brief Extract the outputs after inference.
@ -126,7 +84,6 @@ __attribute__((import_module("wasi_nn")));
error
get_output(graph_execution_context ctx, uint32_t index,
tensor_data output_tensor, uint32_t *output_tensor_size)
__attribute__((export_module("wasi_nn")))
__attribute__((import_module("wasi_nn")));
#endif

View File

@ -1,44 +0,0 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_COMMON_H
#define WASI_NN_COMMON_H
#include <stdint.h>
// The type of the elements in a tensor.
typedef enum { fp16 = 0, fp32, up8, ip32 } tensor_type;
// Describes the encoding of the graph. This allows the API to be implemented by
// various backends that encode (i.e., serialize) their graph IR with different
// formats.
typedef enum { openvino = 0, onnx, tensorflow, pytorch } graph_encoding;
// Define where the graph should be executed.
typedef enum { cpu = 0, gpu, tpu } execution_target;
// Error codes returned by functions in this API.
typedef enum {
// No error occurred.
success = 0,
// Caller module passed an invalid argument.
invalid_argument,
// Invalid encoding.
invalid_encoding,
// Caller module is missing a memory export.
missing_memory,
// Device or resource busy.
busy,
// Runtime Error.
runtime_error,
} error;
// An execution graph for performing inference (i.e., a model).
typedef uint32_t graph;
// Bind a `graph` to the input and output tensors for an inference.
typedef uint32_t graph_execution_context;
#endif

View File

@ -1,264 +0,0 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdio.h>
#include <assert.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include "wasi_nn_common.h"
#include "wasm_export.h"
#include "bh_platform.h"
#include "wasi_nn.h"
#include "wasi_nn_tensorflow.hpp"
#include "logger.h"
/* Definition of 'wasi_nn.h' structs in WASM app format (using offset) */
typedef struct {
uint32_t buf_offset;
uint32_t size;
} graph_builder_wasm;
typedef struct {
uint32_t buf_offset;
uint32_t size;
} graph_builder_array_wasm;
typedef struct {
uint32_t dimensions_offset;
tensor_type type;
uint32_t data_offset;
} tensor_wasm;
typedef struct {
uint32_t buf_offset;
uint32_t size;
} tensor_dimensions_wasm;
/* Global variables */
static uint8_t _is_initialized;
static graph_encoding _encoding;
/* Utils */
static error
check_initialized()
{
if (!_is_initialized) {
NN_ERR_PRINTF("Model not initialized.");
return invalid_argument;
}
if (_encoding != tensorflow) {
NN_ERR_PRINTF("Model encoding is not tensorflow.");
return invalid_argument;
}
return success;
}
/* WASI-NN implementation */
error
wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
graph_encoding encoding, execution_target target, graph *graph)
{
NN_DBG_PRINTF("Running wasi_nn_load [encoding=%d, target=%d]...", encoding,
target);
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
bh_assert(instance);
if (!wasm_runtime_validate_native_addr(instance, builder,
sizeof(graph_builder_array_wasm)))
return invalid_argument;
if (!wasm_runtime_validate_app_addr(instance, builder->buf_offset,
builder->size * sizeof(uint32_t)))
return invalid_argument;
NN_DBG_PRINTF("Graph builder array contains %d elements", builder->size);
graph_builder_wasm *gb_wasm =
(graph_builder_wasm *)wasm_runtime_addr_app_to_native(
instance, builder->buf_offset);
graph_builder *gb_native = (graph_builder *)wasm_runtime_malloc(
builder->size * sizeof(graph_builder));
if (gb_native == NULL)
return missing_memory;
for (int i = 0; i < builder->size; ++i) {
if (!wasm_runtime_validate_app_addr(instance, gb_wasm[i].buf_offset,
gb_wasm[i].size
* sizeof(uint8_t))) {
wasm_runtime_free(gb_native);
return invalid_argument;
}
gb_native[i].buf = (uint8_t *)wasm_runtime_addr_app_to_native(
instance, gb_wasm[i].buf_offset);
gb_native[i].size = gb_wasm[i].size;
NN_DBG_PRINTF("Graph builder %d contains %d elements", i,
gb_wasm[i].size);
}
graph_builder_array gba_native = { .buf = gb_native,
.size = builder->size };
if (!wasm_runtime_validate_native_addr(instance, graph, sizeof(graph))) {
wasm_runtime_free(gb_native);
return invalid_argument;
}
switch (encoding) {
case tensorflow:
break;
default:
NN_ERR_PRINTF("Only tensorflow is supported.");
wasm_runtime_free(gb_native);
return invalid_argument;
}
_encoding = encoding;
_is_initialized = 1;
error res = tensorflow_load(gba_native, _encoding, target, graph);
NN_DBG_PRINTF("wasi_nn_load finished with status %d [graph=%d]", res,
*graph);
wasm_runtime_free(gb_native);
return res;
}
error
wasi_nn_init_execution_context(wasm_exec_env_t exec_env, graph graph,
graph_execution_context *ctx)
{
NN_DBG_PRINTF("Running wasi_nn_init_execution_context [graph=%d]...",
graph);
error res;
if (success != (res = check_initialized()))
return res;
res = tensorflow_init_execution_context(graph);
*ctx = graph;
NN_DBG_PRINTF(
"wasi_nn_init_execution_context finished with status %d [ctx=%d]", res,
*ctx);
return res;
}
error
wasi_nn_set_input(wasm_exec_env_t exec_env, graph_execution_context ctx,
uint32_t index, tensor_wasm *input_tensor)
{
NN_DBG_PRINTF("Running wasi_nn_set_input [ctx=%d, index=%d]...", ctx,
index);
error res;
if (success != (res = check_initialized()))
return res;
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
bh_assert(instance);
if (!wasm_runtime_validate_native_addr(instance, input_tensor,
sizeof(tensor_wasm)))
return invalid_argument;
if (!wasm_runtime_validate_app_addr(
instance, input_tensor->dimensions_offset, sizeof(uint32_t)))
return invalid_argument;
tensor_dimensions_wasm *dimensions_w =
(tensor_dimensions_wasm *)wasm_runtime_addr_app_to_native(
instance, input_tensor->dimensions_offset);
if (!wasm_runtime_validate_app_addr(instance, dimensions_w->buf_offset,
dimensions_w->size * sizeof(uint32_t)))
return invalid_argument;
tensor_dimensions dimensions = {
.buf = (uint32_t *)wasm_runtime_addr_app_to_native(
instance, dimensions_w->buf_offset),
.size = dimensions_w->size
};
NN_DBG_PRINTF("Number of dimensions: %d", dimensions.size);
int total_elements = 1;
for (int i = 0; i < dimensions.size; ++i) {
NN_DBG_PRINTF("Dimension %d: %d", i, dimensions.buf[i]);
total_elements *= dimensions.buf[i];
}
NN_DBG_PRINTF("Tensor type: %d", input_tensor->type);
if (!wasm_runtime_validate_app_addr(instance, input_tensor->data_offset,
total_elements))
return invalid_argument;
tensor tensor = { .type = input_tensor->type,
.dimensions = &dimensions,
.data = (uint8_t *)wasm_runtime_addr_app_to_native(
instance, input_tensor->data_offset) };
res = tensorflow_set_input(ctx, index, &tensor);
NN_DBG_PRINTF("wasi_nn_set_input finished with status %d", res);
return res;
}
error
wasi_nn_compute(wasm_exec_env_t exec_env, graph_execution_context ctx)
{
NN_DBG_PRINTF("Running wasi_nn_compute [ctx=%d]...", ctx);
error res;
if (success != (res = check_initialized()))
return res;
res = tensorflow_compute(ctx);
NN_DBG_PRINTF("wasi_nn_compute finished with status %d", res);
return res;
}
error
wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
uint32_t index, tensor_data output_tensor,
uint32_t *output_tensor_size)
{
NN_DBG_PRINTF("Running wasi_nn_get_output [ctx=%d, index=%d]...", ctx,
index);
error res;
if (success != (res = check_initialized()))
return res;
res = tensorflow_get_output(ctx, index, output_tensor, output_tensor_size);
NN_DBG_PRINTF("wasi_nn_get_output finished with status %d [data_size=%d]",
res, *output_tensor_size);
return res;
}
/* Register WASI-NN in WAMR */
/* clang-format off */
#define REG_NATIVE_FUNC(func_name, signature) \
{ #func_name, wasi_nn_##func_name, signature, NULL }
/* clang-format on */
static NativeSymbol native_symbols_wasi_nn[] = {
REG_NATIVE_FUNC(load, "(*ii*)i"),
REG_NATIVE_FUNC(init_execution_context, "(i*)i"),
REG_NATIVE_FUNC(set_input, "(ii*)i"),
REG_NATIVE_FUNC(compute, "(i)i"),
REG_NATIVE_FUNC(get_output, "(ii**)i"),
};
uint32_t
get_wasi_nn_export_apis(NativeSymbol **p_libc_wasi_apis)
{
*p_libc_wasi_apis = native_symbols_wasi_nn;
return sizeof(native_symbols_wasi_nn) / sizeof(NativeSymbol);
}

View File

@ -1,40 +0,0 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_TENSORFLOW_HPP
#define WASI_NN_TENSORFLOW_HPP
#include <stdio.h>
#include "wasi_nn.h"
#include "logger.h"
#ifdef __cplusplus
extern "C" {
#endif
error
tensorflow_load(graph_builder_array builder, graph_encoding encoding,
execution_target target, graph *graph);
error
tensorflow_init_execution_context(graph graph);
error
tensorflow_set_input(graph_execution_context ctx, uint32_t index,
tensor *input_tensor);
error
tensorflow_compute(graph_execution_context ctx);
error
tensorflow_get_output(graph_execution_context context, uint32_t index,
tensor_data output_tensor, uint32_t *output_tensor_size);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,106 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_TYPES_H
#define WASI_NN_TYPES_H
/**
* ERRORS
*
*/
// Error codes returned by functions in this API.
typedef enum {
// No error occurred.
success = 0,
// Caller module passed an invalid argument.
invalid_argument,
// Invalid encoding.
invalid_encoding,
// Caller module is missing a memory export.
missing_memory,
// Device or resource busy.
busy,
// Runtime Error.
runtime_error,
} error;
/**
* TENSOR
*
*/
// The dimensions of a tensor.
//
// The array length matches the tensor rank and each element in the array
// describes the size of each dimension.
typedef struct {
uint32_t *buf;
uint32_t size;
} tensor_dimensions;
// The type of the elements in a tensor.
typedef enum { fp16 = 0, fp32, up8, ip32 } tensor_type;
// The tensor data.
//
// Initially conceived as a sparse representation, each empty cell would be
// filled with zeros and the array length must match the product of all of the
// dimensions and the number of bytes in the type (e.g., a 2x2 tensor with
// 4-byte f32 elements would have a data array of length 16). Naturally, this
// representation requires some knowledge of how to lay out data in
// memory--e.g., using row-major ordering--and could perhaps be improved.
typedef uint8_t *tensor_data;
// A tensor.
typedef struct {
// Describe the size of the tensor (e.g., 2x2x2x2 -> [2, 2, 2, 2]). To
// represent a tensor containing a single value, use `[1]` for the tensor
// dimensions.
tensor_dimensions *dimensions;
// Describe the type of element in the tensor (e.g., f32).
tensor_type type;
// Contains the tensor data.
tensor_data data;
} tensor;
/**
* GRAPH
*
*/
// The graph initialization data.
//
// This consists of an array of buffers because implementing backends may encode
// their graph IR in parts (e.g., OpenVINO stores its IR and weights
// separately).
typedef struct {
uint8_t *buf;
uint32_t size;
} graph_builder;
typedef struct {
graph_builder *buf;
uint32_t size;
} graph_builder_array;
// An execution graph for performing inference (i.e., a model).
typedef uint32_t graph;
// Describes the encoding of the graph. This allows the API to be implemented by
// various backends that encode (i.e., serialize) their graph IR with different
// formats.
typedef enum {
openvino = 0,
onnx,
tensorflow,
pytorch,
tensorflowlite
} graph_encoding;
// Define where the graph should be executed.
typedef enum execution_target { cpu = 0, gpu, tpu } execution_target;
#endif

View File

@ -77,7 +77,7 @@ os_socket_bind(bh_socket_t socket, const char *host, int *port)
}
socklen = sizeof(addr);
if (getsockname(socket, (void *)&addr, &socklen) == -1) {
if (getsockname(socket, (struct sockaddr *)&addr, &socklen) == -1) {
goto fail;
}
@ -120,7 +120,7 @@ os_socket_accept(bh_socket_t server_sock, bh_socket_t *sock, void *addr,
unsigned int *addrlen)
{
struct sockaddr addr_tmp;
unsigned int len = sizeof(struct sockaddr);
socklen_t len = sizeof(struct sockaddr);
*sock = accept(server_sock, (struct sockaddr *)&addr_tmp, &len);
@ -205,7 +205,7 @@ os_socket_addr_remote(bh_socket_t socket, bh_sockaddr_t *sockaddr)
struct sockaddr_in addr;
socklen_t addr_len = sizeof(addr);
if (getpeername(socket, &addr, &addr_len) == -1) {
if (getpeername(socket, (struct sockaddr *)&addr, &addr_len) == -1) {
return BHT_ERROR;
}
@ -219,7 +219,7 @@ os_socket_addr_local(bh_socket_t socket, bh_sockaddr_t *sockaddr)
struct sockaddr_in addr;
socklen_t addr_len = sizeof(addr);
if (getsockname(socket, &addr, &addr_len) == -1) {
if (getsockname(socket, (struct sockaddr *)&addr, &addr_len) == -1) {
return BHT_ERROR;
}

View File

@ -16,6 +16,11 @@
#define SGX_ERROR_FILE_LOWEST_ERROR_ID SGX_ERROR_FILE_BAD_STATUS
#define SGX_ERROR_FILE_HIGHEST_ERROR_ID SGX_ERROR_FILE_CLOSE_FAILED
// Internal buffer filled with zeroes and used when extending the size of
// protected files.
#define ZEROES_PADDING_LENGTH 32 * 1024
char zeroes_padding[ZEROES_PADDING_LENGTH] = { 0 };
// The mapping between file descriptors and IPFS file pointers.
static HashMap *ipfs_file_list;
@ -78,6 +83,27 @@ ipfs_file_destroy(void *sgx_file)
sgx_fclose(sgx_file);
}
// Writes a given number of zeroes in file at the current offset.
// The return value is zero if successful; otherwise non-zero.
static int
ipfs_write_zeroes(void *sgx_file, size_t len)
{
int min_count;
while (len > 0) {
min_count = len < ZEROES_PADDING_LENGTH ? len : ZEROES_PADDING_LENGTH;
if (sgx_fwrite(zeroes_padding, 1, min_count, sgx_file) == 0) {
errno = convert_sgx_errno(sgx_ferror(sgx_file));
return -1;
}
len -= min_count;
}
return 0;
}
int
ipfs_init()
{
@ -104,7 +130,7 @@ ipfs_posix_fallocate(int fd, off_t offset, size_t len)
// The wrapper for fseek takes care of extending the file if sought beyond
// the end
if (ipfs_lseek(fd, offset + len, SEEK_CUR) == -1) {
if (ipfs_lseek(fd, offset + len, SEEK_SET) == -1) {
return errno;
}
@ -354,7 +380,7 @@ ipfs_fflush(int fd)
off_t
ipfs_lseek(int fd, off_t offset, int nwhence)
{
off_t new_offset;
off_t cursor_current_location;
void *sgx_file = fd2file(fd);
if (!sgx_file) {
errno = EBADF;
@ -364,20 +390,20 @@ ipfs_lseek(int fd, off_t offset, int nwhence)
// Optimization: if the offset is 0 and the whence is SEEK_CUR,
// this is equivalent of a call to ftell.
if (offset == 0 && nwhence == SEEK_CUR) {
int64_t ftell_result = (off_t)sgx_ftell(sgx_file);
cursor_current_location = (off_t)sgx_ftell(sgx_file);
if (ftell_result == -1) {
if (cursor_current_location == -1) {
errno = convert_sgx_errno(sgx_ferror(sgx_file));
return -1;
}
return ftell_result;
return cursor_current_location;
}
int fseek_result = sgx_fseek(sgx_file, offset, nwhence);
if (fseek_result == 0) {
new_offset = (__wasi_filesize_t)sgx_ftell(sgx_file);
off_t new_offset = (off_t)sgx_ftell(sgx_file);
if (new_offset == -1) {
errno = convert_sgx_errno(sgx_ferror(sgx_file));
@ -405,17 +431,39 @@ ipfs_lseek(int fd, off_t offset, int nwhence)
// manually.
// Assume the error is raised because the cursor is moved beyond the end
// of the file. Try to move the cursor at the end of the file.
// of the file.
// If the whence is the current cursor location, retrieve it
if (nwhence == SEEK_CUR) {
cursor_current_location = (off_t)sgx_ftell(sgx_file);
}
// Move the cursor at the end of the file
if (sgx_fseek(sgx_file, 0, SEEK_END) == -1) {
errno = convert_sgx_errno(sgx_ferror(sgx_file));
return -1;
}
// Compute the number of zeroes to append.
int64_t number_of_zeroes;
switch (nwhence) {
case SEEK_SET:
number_of_zeroes = offset - sgx_ftell(sgx_file);
break;
case SEEK_END:
number_of_zeroes = offset;
break;
case SEEK_CUR:
number_of_zeroes =
cursor_current_location + offset - sgx_ftell(sgx_file);
break;
default:
errno = EINVAL;
return -1;
}
// Write the missing zeroes
char zero = 0;
int64_t number_of_zeroes = offset - sgx_ftell(sgx_file);
if (sgx_fwrite(&zero, 1, number_of_zeroes, sgx_file) == 0) {
errno = convert_sgx_errno(sgx_ferror(sgx_file));
if (ipfs_write_zeroes(sgx_file, number_of_zeroes) != 0) {
return -1;
}
@ -468,9 +516,7 @@ ipfs_ftruncate(int fd, off_t len)
// Increasing the size is equal to writing from the end of the file
// with null bytes.
char null_byte = 0;
if (sgx_fwrite(&null_byte, 1, len - file_size, sgx_file) == 0) {
errno = convert_sgx_errno(sgx_ferror(sgx_file));
if (ipfs_write_zeroes(sgx_file, len - file_size) != 0) {
return -1;
}

View File

@ -3,12 +3,12 @@
Prepare WASM building environments
==================================
For C and C++, WASI-SDK version 12.0+ is the major tool supported by WAMR to build WASM applications. Also, we can use [Emscripten SDK (EMSDK)](https://github.com/emscripten-core/emsdk), but it is not recommended. And there are some other compilers such as the standard clang compiler, which might also work [here](./other_wasm_compilers.md).
For C and C++, WASI-SDK version 19.0+ is the major tool supported by WAMR to build WASM applications. Also, we can use [Emscripten SDK (EMSDK)](https://github.com/emscripten-core/emsdk), but it is not recommended. And there are some other compilers such as the standard clang compiler, which might also work [here](./other_wasm_compilers.md).
To install WASI SDK, please download the [wasi-sdk release](https://github.com/CraneStation/wasi-sdk/releases) and extract the archive to default path `/opt/wasi-sdk`.
The official *wasi-sdk release* doesn't fully support *latest 128-bit SIMD spec* yet. WAMR provides a script in [build-wasi-sdk](../test-tools/build-wasi-sdk/) to generate
another wasi-sdk with *llvm-13* from source code and installs it at *../test-tools/wasi-sdk*. If you plan to build WASM applications with *latest 128-bit SIMD*, please use it instead of the official release.
another wasi-sdk with *llvm-15* from source code and installs it at *../test-tools/wasi-sdk*. If you plan to build WASM applications with *latest 128-bit SIMD*, please use it instead of the official release.
And [sample workloads](../samples/workload) are using the self-compiled wasi-sdk.

View File

@ -10,9 +10,13 @@ This document describes how to port WAMR to a new platform "**new-os**"
# Step 1: Implement platform API layer
-------------------------
Firstly create the folder **`core/shared/platform/new-os`** for platform API layer implementations. In the folder you just created, you must provide the following files:
Firstly create the folder for platform API layer implementations:
* for common platforms, create a folder in **`core/shared/platform/new-os`** in WAMR repository folder, so the implementation can be upstreamed
* for platforms that are internal and its implementation shouldn't be published, it's recommended to create a folder outside of the WAMR repository folder (e.g. have separate repository for platform API layer implementation)
- `platform_internal.h`: It can be used for any platform specific definitions such as macros, data types and internal APIs.
In the folder you just created, you must provide the following files:
- `platform_internal.h`: It can be used for any platform-specific definitions such as macros, data types and internal APIs.
- `shared_platform.cmake`: the cmake file will be included by the building script. It is recommended to add a definition for your platform:
@ -20,16 +24,16 @@ Firstly create the folder **`core/shared/platform/new-os`** for platform API lay
add_definitions(-DBH_PLATFORM_YOUR_NAME)
```
Then go to implement the APIs defined in following header files for the platform abstraction layer:
Then go to implement the APIs defined in the following header files for the platform abstraction layer:
- [`platform_api_vmcore.h`](../core/shared/platform/include/platform_api_vmcore.h): mandatory for building mini-product (vmcore only). Part of APIs are needed only for Ahead of Time compilation support.
- [`platform_api_vmcore.h`](../core/shared/platform/include/platform_api_vmcore.h): mandatory for building mini-product (vmcore only). Part of the APIs is needed only for Ahead of Time compilation support.
- [`platform_api_extension.h`](../core/shared/platform/include/platform_api_extension.h): mandatory for app-mgr and app-framework. Given that the app-mgr and app-framework are not required for your target platform, you won't have to implement the API defined in the `platform_api_extension.h`.
**common/posix:**
There is posix based implementation of the platform API located in the `platform/common/posix` folder. You can include it if your platform support posix API. refer to platform linux implementation.
There is posix based implementation of the platform API located in the `platform/common/posix` folder. You can include it if your platform supports posix API. refer to platform linux implementation.
@ -42,7 +46,7 @@ Some platforms such as ZephyrOS don't provide math functions e.g. sqrt, fabs and
# Step 2: Create the mini product for the platform
-------------------------
You can build a mini WAMR product which is only the vmcore for you platform. Normally you need to implement the main function which loads a WASM file and run it with the WASM runtime. You don't have to do this step if there is no mini-product need for your platform porting.
You can build a mini WAMR product which is only the vmcore for your platform. Normally you need to implement the main function which loads a WASM file and run it with the WASM runtime. You don't have to do this step if there is no mini-product need for your platform porting.
@ -58,6 +62,11 @@ cd build
cmake .. -DWAMR_BUILD_PLATFORM=new-os
```
For platform implementations that are outside of the WAMR repository (e.g. internal platforms), you also need to provide `SHARED_PLATFORM_CONFIG` path:
```
cmake .. -DWAMR_BUILD_PLATFORM=new-os -DSHARED_PLATFORM_CONFIG=/path/to/new-os/shared_platform.cmake
```
Refer to [build_wamr.md](./build_wamr.md) for the building configurations and parameters.

View File

@ -0,0 +1 @@
include src/wamr/libs/*

View File

@ -1,31 +1,34 @@
# wamr-python
The WAMR Python package contains a set of high-level bindings for WAMR API and WASM-C-API.
## Installation
### Installing from the source code
Installing from local source tree is in _development mode_. The package appears to be installed but still is editable from the source tree.
To Install from local source tree in _development mode_ run the following command,
```bash
$ python -m pip install -e /path/to/wamr-root/binding/python
python -m pip install -e .
```
In this mode the package appears to be installed but still is editable from the source tree.
## Usage
```python
import wamr.ffi as ffi
From the same package you can use two set of APIs.
To use the WAMR API you can import the symbols as follows,
```py
from wamr.wamrapi.wamr import Engine, Module, Instance, ExecEnv
```
### Preparation
In the order hand, to use the WASM-C-API,
The binding will load the shared library _libiwasm.so_ from the WAMR repo. So before running the binding, you need to build the library yourself.
```py
import wamr.wasmcapi.ffi as ffi
```
The default compile options are good enough.
For more information:
Please be aware that `wasm_frame_xxx` and `wasm_trap_xxx` only work well when enabling `WAMR_BUILD_DUMP_CALL_STACK`.
### Examples
There is a [simple example](./samples/hello_procedural.py) to show how to use bindings. Actually, the python binding follows C-APIs. There it should be easy if be familiar with _programming with wasm-c-api_.
Unit test cases under _./tests_ could be another but more complete references.
* [WAMR API](./wamr_api)
* [WASM-C-API](./wasm_c_api)

View File

@ -8,7 +8,28 @@
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
from setuptools import setup, find_packages
import pathlib
from setuptools import setup
from setuptools.command.develop import develop
from setuptools.command.install import install
from subprocess import check_call
def build_library():
cur_path = pathlib.Path(__file__).parent
check_call(f"{cur_path}/utils/create_lib.sh".split())
class PreDevelopCommand(develop):
"""Pre-installation for development mode."""
def run(self):
build_library()
develop.run(self)
class PreInstallCommand(install):
"""Pre-installation for installation mode."""
def run(self):
build_library()
install.run(self)
with open("README.md") as f:
@ -24,7 +45,11 @@ setup(
long_description=readme,
author="The WAMR Project Developers",
author_email="hello@bytecodealliance.org",
url="https://github.com/bytecodealliance/wamr-python",
url="https://github.com/bytecodealliance/wasm-micro-runtime",
license=license,
packages=["wamr"],
include_package_data=True,
cmdclass={
'develop': PreDevelopCommand,
'install': PreInstallCommand,
},
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,123 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
from ctypes import Array
from ctypes import c_char
from ctypes import c_uint
from ctypes import c_uint8
from ctypes import c_void_p
from ctypes import cast
from ctypes import create_string_buffer
from ctypes import POINTER
from ctypes import pointer
from wamr.wamrapi.iwasm import Alloc_With_Pool
from wamr.wamrapi.iwasm import RuntimeInitArgs
from wamr.wamrapi.iwasm import wasm_exec_env_t
from wamr.wamrapi.iwasm import wasm_function_inst_t
from wamr.wamrapi.iwasm import wasm_module_inst_t
from wamr.wamrapi.iwasm import wasm_module_t
from wamr.wamrapi.iwasm import wasm_runtime_call_wasm
from wamr.wamrapi.iwasm import wasm_runtime_create_exec_env
from wamr.wamrapi.iwasm import wasm_runtime_deinstantiate
from wamr.wamrapi.iwasm import wasm_runtime_destroy
from wamr.wamrapi.iwasm import wasm_runtime_destroy_exec_env
from wamr.wamrapi.iwasm import wasm_runtime_full_init
from wamr.wamrapi.iwasm import wasm_runtime_instantiate
from wamr.wamrapi.iwasm import wasm_runtime_load
from wamr.wamrapi.iwasm import wasm_runtime_lookup_function
from wamr.wamrapi.iwasm import wasm_runtime_unload
class Engine:
def __init__(self):
self.init_args = self._get_init_args()
wasm_runtime_full_init(pointer(self.init_args))
def __del__(self):
print("deleting Engine")
wasm_runtime_destroy()
def _get_init_args(self, heap_size: int = 1024 * 512) -> RuntimeInitArgs:
init_args = RuntimeInitArgs()
init_args.mem_alloc_type = Alloc_With_Pool
init_args.mem_alloc_option.pool.heap_buf = cast(
(c_char * heap_size)(), c_void_p
)
init_args.mem_alloc_option.pool.heap_size = heap_size
return init_args
class Module:
__create_key = object()
@classmethod
def from_file(cls, engine: Engine, fp: str) -> "Module":
return Module(cls.__create_key, engine, fp)
def __init__(self, create_key: object, engine: Engine, fp: str) -> None:
assert (
create_key == Module.__create_key
), "Module objects must be created using Module.from_file"
self.engine = engine
self.module, self.file_data = self._create_module(fp)
def __del__(self):
print("deleting Module")
wasm_runtime_unload(self.module)
def _create_module(self, fp: str) -> tuple[wasm_module_t, Array[c_uint]]:
with open(fp, "rb") as f:
data = f.read()
data = (c_uint8 * len(data))(*data)
error_buf = create_string_buffer(128)
module = wasm_runtime_load(data, len(data), error_buf, len(error_buf))
if not module:
raise Exception("Error while creating module")
return module, data
class Instance:
def __init__(self, module: Module, stack_size: int = 65536, heap_size: int = 16384):
self.module = module
self.module_inst = self._create_module_inst(module, stack_size, heap_size)
def __del__(self):
print("deleting Instance")
wasm_runtime_deinstantiate(self.module_inst)
def lookup_function(self, name: str):
func = wasm_runtime_lookup_function(self.module_inst, name, None)
if not func:
raise Exception("Error while looking-up function")
return func
def _create_module_inst(self, module: Module, stack_size: int, heap_size: int) -> wasm_module_inst_t:
error_buf = create_string_buffer(128)
module_inst = wasm_runtime_instantiate(
module.module, stack_size, heap_size, error_buf, len(error_buf)
)
if not module_inst:
raise Exception("Error while creating module instance")
return module_inst
class ExecEnv:
def __init__(self, module_inst: Instance, stack_size: int = 65536):
self.module_inst = module_inst
self.exec_env = self._create_exec_env(module_inst, stack_size)
def __del__(self):
print("deleting ExecEnv")
wasm_runtime_destroy_exec_env(self.exec_env)
def call(self, func: wasm_function_inst_t, argc: int, argv: "POINTER[c_uint]"):
if not wasm_runtime_call_wasm(self.exec_env, func, argc, argv):
raise Exception("Error while calling function")
def _create_exec_env(self, module_inst: Instance, stack_size: int) -> wasm_exec_env_t:
exec_env = wasm_runtime_create_exec_env(module_inst.module_inst, stack_size)
if not exec_env:
raise Exception("Error while creating execution environment")
return exec_env

View File

@ -36,8 +36,8 @@ current_file = Path(__file__)
if current_file.is_symlink():
current_file = Path(os.readlink(current_file))
current_dir = current_file.parent.resolve()
root_dir = current_dir.parent.parent.parent.parent.resolve()
wamr_dir = root_dir.joinpath("wasm-micro-runtime").resolve()
root_dir = current_dir.parents[4].resolve()
wamr_dir = root_dir.resolve()
if not wamr_dir.exists():
raise RuntimeError(f"not found the repo of wasm-micro-runtime under {root_dir}")

View File

@ -0,0 +1,17 @@
#!/bin/sh
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
CUR_DIR=$(cd $(dirname $0) && pwd -P)
ROOT_DIR=${CUR_DIR}/../../..
WAMR_BUILD_PLATFORM=${WAMR_BUILD_PLATFORM:-"linux"}
cd ${ROOT_DIR}/product-mini/platforms/${WAMR_BUILD_PLATFORM}
mkdir -p build && cd build
cmake ..
make -j
cp libiwasm.so ${CUR_DIR}/../src/wamr/libs

View File

@ -0,0 +1,25 @@
# WARM API
## Examples
Copy in `language-bindings/python/wamr/libs` the library `libiwasm` generated from `product-mini/platforms`.
There is a [simple example](./samples/main.py) to show how to use bindings.
```
python samples/main.py
```
## Update WAMR API bindings
Install requirements,
```
pip install -r requirements.txt
```
Run the following command,
```sh
ctypesgen ../../../../core/iwasm/include/wasm_export.h -l ../libs/libiwasm.so -o iwasm.py
```

View File

@ -0,0 +1 @@
ctypesgen==1.1.1

View File

@ -0,0 +1,11 @@
#!/bin/sh
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
/opt/wasi-sdk/bin/clang \
-O0 -z stack-size=4096 -Wl,--initial-memory=65536 \
-Wl,--strip-all,--no-entry -nostdlib \
-Wl,--export=sum\
-Wl,--allow-undefined \
-o test.wasm sum.c

View File

@ -0,0 +1,22 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
from wamr.wamrapi.wamr import Engine, Module, Instance, ExecEnv
from ctypes import c_uint
import pathlib
def main():
engine = Engine()
module = Module.from_file(engine, pathlib.Path(__file__).parent / "sum.wasm")
module_inst = Instance(module)
exec_env = ExecEnv(module_inst)
func = module_inst.lookup_function("sum")
argv = (c_uint * 2)(*[10, 11])
exec_env.call(func, len(argv), argv)
print(argv[0])
if __name__ == "__main__":
main()

View File

@ -0,0 +1,12 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdio.h>
int
sum(int a, int b)
{
return a + b;
}

View File

@ -0,0 +1,7 @@
# WASM-C-API
## Examples
There is a [simple example](./samples/hello_procedural.py) to show how to use bindings. Actually, the python binding follows C-APIs. There it should be easy if be familiar with _programming with wasm-c-api_.
Unit test cases under _./tests_ could be another but more complete references.

Some files were not shown because too many files have changed in this diff Show More