Integrate WASI-NN into WAMR (#1521)

Initial integration of WASI-NN based on #1225:
- Implement the library core/iwasm/libraries/wasi-nn
- Support TensorFlow, CPU, F32 at the first stage
- Add cmake variable `-DWAMR_BUILD_WASI_NN`
- Add test case based on Docker image and update document

Refer to #1573
This commit is contained in:
tonibofarull 2022-10-12 06:09:29 +02:00 committed by GitHub
parent 78c38d088e
commit e53ab91439
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 1459 additions and 0 deletions

1
.gitignore vendored
View File

@ -1,6 +1,7 @@
.cache
.vs
.vscode
.venv
/.idea
**/cmake-build-*/
**/*build/

View File

@ -291,3 +291,6 @@ if (WAMR_BUILD_SGX_IPFS EQUAL 1)
add_definitions (-DWASM_ENABLE_SGX_IPFS=1)
message (" SGX IPFS enabled")
endif ()
if (WAMR_BUILD_WASI_NN EQUAL 1)
message (" WASI-NN enabled")
endif ()

View File

@ -91,6 +91,19 @@ if (WAMR_BUILD_LIB_PTHREAD_SEMAPHORE EQUAL 1)
set (WAMR_BUILD_LIB_PTHREAD 1)
endif ()
if (WAMR_BUILD_WASI_NN EQUAL 1)
execute_process(COMMAND ${WAMR_ROOT_DIR}/core/deps/install_tensorflow.sh
RESULT_VARIABLE TENSORFLOW_RESULT
)
set(TENSORFLOW_SOURCE_DIR "${WAMR_ROOT_DIR}/core/deps/tensorflow-src")
include_directories (${CMAKE_CURRENT_BINARY_DIR}/flatbuffers/include)
include_directories (${TENSORFLOW_SOURCE_DIR})
add_subdirectory(
"${TENSORFLOW_SOURCE_DIR}/tensorflow/lite"
"${CMAKE_CURRENT_BINARY_DIR}/tensorflow-lite" EXCLUDE_FROM_ALL)
include (${IWASM_DIR}/libraries/wasi-nn/wasi_nn.cmake)
endif ()
if (WAMR_BUILD_LIB_PTHREAD EQUAL 1)
include (${IWASM_DIR}/libraries/lib-pthread/lib_pthread.cmake)
# Enable the dependent feature if lib pthread is enabled
@ -152,6 +165,7 @@ set (source_all
${UTILS_SHARED_SOURCE}
${LIBC_BUILTIN_SOURCE}
${LIBC_WASI_SOURCE}
${LIBC_WASI_NN_SOURCE}
${IWASM_COMMON_SOURCE}
${IWASM_INTERP_SOURCE}
${IWASM_AOT_SOURCE}

11
core/deps/install_tensorflow.sh Executable file
View File

@ -0,0 +1,11 @@
#!/bin/sh
DEPS_ROOT=$(cd "$(dirname "$0")/" && pwd)
cd ${DEPS_ROOT}
echo "Downloading tensorflow in ${PWD}..."
git clone https://github.com/tensorflow/tensorflow.git tensorflow-src \
--branch v2.9.2
exit 0

View File

@ -33,6 +33,9 @@ get_spectest_export_apis(NativeSymbol **p_libc_builtin_apis);
uint32
get_libc_wasi_export_apis(NativeSymbol **p_libc_wasi_apis);
uint32_t
get_wasi_nn_export_apis(NativeSymbol **p_libc_wasi_apis);
uint32
get_base_lib_export_apis(NativeSymbol **p_base_lib_apis);
@ -425,6 +428,13 @@ wasm_native_init()
goto fail;
#endif /* WASM_ENABLE_LIB_RATS */
#if WASM_ENABLE_WASI_NN != 0
n_native_symbols = get_wasi_nn_export_apis(&native_symbols);
if (!wasm_native_register_natives("wasi_nn", native_symbols,
n_native_symbols))
return false;
#endif
return true;
fail:
wasm_native_destroy();

View File

@ -0,0 +1 @@
**/Dockerfile

View File

@ -0,0 +1,43 @@
# WASI-NN
## How to use
Enable WASI-NN in the WAMR by spefiying it in the cmake building configuration as follows,
```
set (WAMR_BUILD_WASI_NN 1)
```
The definition of the functions provided by WASI-NN is in the header file `core/iwasm/libraries/wasi-nn/wasi_nn.h`.
By only including this file in your WASM application you will bind WASI-NN into your module.
## Tests
To run the tests we assume that the current directory is the root of the repository.
1. Build the docker image,
```
docker build -t wasi-nn -f core/iwasm/libraries/wasi-nn/test/Dockerfile .
```
2. Run the container
```
docker run wasi-nn
```
If all the tests have run properly you will the the following message in the terminal,
```
Tests: passed!
```
## What is missing
* Only 1 model at a time is supported.
* `graph` and `graph-execution-context` are ignored.
* Only `tensorflow` (lite) is supported.
* Only `cpu` is supported.

View File

@ -0,0 +1,55 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_LOGGER_H
#define WASI_NN_LOGGER_H
#include <stdio.h>
#include <string.h>
#define __FILENAME__ \
(strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
/* Disable a level by removing the define */
#define ENABLE_ERR_LOG
#define ENABLE_WARN_LOG
#define ENABLE_DBG_LOG
#define ENABLE_INFO_LOG
// Definition of the levels
#ifdef ENABLE_ERR_LOG
#define NN_ERR_PRINTF(fmt, ...) \
printf("[%s:%d] " fmt, __FILENAME__, __LINE__, ##__VA_ARGS__); \
printf("\n"); \
fflush(stdout)
#else
#define NN_ERR_PRINTF(fmt, ...)
#endif
#ifdef ENABLE_WARN_LOG
#define NN_WARN_PRINTF(fmt, ...) \
printf("[%s:%d] " fmt, __FILENAME__, __LINE__, ##__VA_ARGS__); \
printf("\n"); \
fflush(stdout)
#else
#define NN_WARN_PRINTF(fmt, ...)
#endif
#ifdef ENABLE_DBG_LOG
#define NN_DBG_PRINTF(fmt, ...) \
printf("[%s:%d] " fmt, __FILENAME__, __LINE__, ##__VA_ARGS__); \
printf("\n"); \
fflush(stdout)
#else
#define NN_DBG_PRINTF(fmt, ...)
#endif
#ifdef ENABLE_INFO_LOG
#define NN_INFO_PRINTF(fmt, ...) \
printf("[%s:%d] " fmt, __FILENAME__, __LINE__, ##__VA_ARGS__); \
printf("\n"); \
fflush(stdout)
#else
#define NN_INFO_PRINTF(fmt, ...)
#endif
#endif

View File

@ -0,0 +1,178 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
cmake_minimum_required (VERSION 2.9)
project (iwasm)
set (CMAKE_VERBOSE_MAKEFILE OFF)
# Reset default linker flags
set (CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "")
set (CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
set (CMAKE_C_STANDARD 99)
set (CMAKE_CXX_STANDARD 14)
if (NOT DEFINED WAMR_BUILD_PLATFORM)
set (WAMR_BUILD_PLATFORM "linux")
endif ()
# Set WAMR_BUILD_TARGET, currently values supported:
# "X86_64", "AMD_64", "X86_32", "AARCH64[sub]", "ARM[sub]", "THUMB[sub]",
# "MIPS", "XTENSA", "RISCV64[sub]", "RISCV32[sub]"
if (NOT DEFINED WAMR_BUILD_TARGET)
if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm64|aarch64)")
set (WAMR_BUILD_TARGET "AARCH64")
elseif (CMAKE_SYSTEM_PROCESSOR STREQUAL "riscv64")
set (WAMR_BUILD_TARGET "RISCV64")
elseif (CMAKE_SIZEOF_VOID_P EQUAL 8)
# Build as X86_64 by default in 64-bit platform
set (WAMR_BUILD_TARGET "X86_64")
elseif (CMAKE_SIZEOF_VOID_P EQUAL 4)
# Build as X86_32 by default in 32-bit platform
set (WAMR_BUILD_TARGET "X86_32")
else ()
message(SEND_ERROR "Unsupported build target platform!")
endif ()
endif ()
if (NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif ()
if (NOT DEFINED WAMR_BUILD_INTERP)
# Enable Interpreter by default
set (WAMR_BUILD_INTERP 1)
endif ()
if (NOT DEFINED WAMR_BUILD_AOT)
# Enable AOT by default.
set (WAMR_BUILD_AOT 1)
endif ()
if (NOT DEFINED WAMR_BUILD_JIT)
# Disable JIT by default.
set (WAMR_BUILD_JIT 0)
endif ()
if (NOT DEFINED WAMR_BUILD_FAST_JIT)
# Disable Fast JIT by default
set (WAMR_BUILD_FAST_JIT 0)
endif ()
if (NOT DEFINED WAMR_BUILD_LIBC_BUILTIN)
# Enable libc builtin support by default
set (WAMR_BUILD_LIBC_BUILTIN 1)
endif ()
if (NOT DEFINED WAMR_BUILD_LIBC_WASI)
# Enable libc wasi support by default
set (WAMR_BUILD_LIBC_WASI 1)
endif ()
if (NOT DEFINED WAMR_BUILD_FAST_INTERP)
# Enable fast interpreter
set (WAMR_BUILD_FAST_INTERP 1)
endif ()
if (NOT DEFINED WAMR_BUILD_MULTI_MODULE)
# Disable multiple modules by default
set (WAMR_BUILD_MULTI_MODULE 0)
endif ()
if (NOT DEFINED WAMR_BUILD_LIB_PTHREAD)
# Disable pthread library by default
set (WAMR_BUILD_LIB_PTHREAD 0)
endif ()
if (NOT DEFINED WAMR_BUILD_MINI_LOADER)
# Disable wasm mini loader by default
set (WAMR_BUILD_MINI_LOADER 0)
endif ()
if (NOT DEFINED WAMR_BUILD_SIMD)
# Enable SIMD by default
set (WAMR_BUILD_SIMD 1)
endif ()
if (NOT DEFINED WAMR_BUILD_REF_TYPES)
# Disable reference types by default
set (WAMR_BUILD_REF_TYPES 0)
endif ()
if (NOT DEFINED WAMR_BUILD_DEBUG_INTERP)
# Disable Debug feature by default
set (WAMR_BUILD_DEBUG_INTERP 0)
endif ()
if (WAMR_BUILD_DEBUG_INTERP EQUAL 1)
set (WAMR_BUILD_FAST_INTERP 0)
set (WAMR_BUILD_MINI_LOADER 0)
set (WAMR_BUILD_SIMD 0)
endif ()
if (COLLECT_CODE_COVERAGE EQUAL 1)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fprofile-arcs -ftest-coverage")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
endif ()
set (WAMR_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../../../..)
include (${WAMR_ROOT_DIR}/build-scripts/runtime_lib.cmake)
add_library(vmlib ${WAMR_RUNTIME_LIB_SOURCE})
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gc-sections -pie -fPIE")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra -Wformat -Wformat-security -Wshadow")
# set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wconversion -Wsign-conversion")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wformat -Wformat-security -Wno-unused")
if (WAMR_BUILD_TARGET MATCHES "X86_.*" OR WAMR_BUILD_TARGET STREQUAL "AMD_64")
if (NOT (CMAKE_C_COMPILER MATCHES ".*clang.*" OR CMAKE_C_COMPILER_ID MATCHES ".*Clang"))
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mindirect-branch-register")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mindirect-branch-register")
# UNDEFINED BEHAVIOR, refer to https://en.cppreference.com/w/cpp/language/ub
if(CMAKE_BUILD_TYPE STREQUAL "Debug" AND NOT WAMR_BUILD_JIT EQUAL 1)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined \
-fno-sanitize=bounds,bounds-strict,alignment \
-fno-sanitize-recover")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined \
-fno-sanitize=bounds,bounds-strict,alignment \
-fno-sanitize-recover")
endif()
else ()
# UNDEFINED BEHAVIOR, refer to https://en.cppreference.com/w/cpp/language/ub
if(CMAKE_BUILD_TYPE STREQUAL "Debug" AND NOT WAMR_BUILD_JIT EQUAL 1)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined \
-fno-sanitize=bounds,alignment \
-fno-sanitize-recover")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined \
-fno-sanitize=bounds,alignment \
-fno-sanitize-recover")
endif()
endif ()
endif ()
# The following flags are to enhance security, but it may impact performance,
# we disable them by default.
#if (WAMR_BUILD_TARGET MATCHES "X86_.*" OR WAMR_BUILD_TARGET STREQUAL "AMD_64")
# set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -ftrapv -D_FORTIFY_SOURCE=2")
#endif ()
#set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fstack-protector-strong --param ssp-buffer-size=4")
#set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wl,-z,noexecstack,-z,relro,-z,now")
include (${SHARED_DIR}/utils/uncommon/shared_uncommon.cmake)
add_executable (iwasm ${WAMR_ROOT_DIR}/product-mini/platforms/${WAMR_BUILD_PLATFORM}/main.c ${UNCOMMON_SHARED_SOURCE})
install (TARGETS iwasm DESTINATION bin)
target_link_libraries (iwasm vmlib ${LLVM_AVAILABLE_LIBS} ${UV_A_LIBS} ${TENSORFLOW_LIB} -lm -ldl -lpthread)
add_library (libiwasm SHARED ${WAMR_RUNTIME_LIB_SOURCE})
install (TARGETS libiwasm DESTINATION lib)
set_target_properties (libiwasm PROPERTIES OUTPUT_NAME iwasm)
target_link_libraries (libiwasm ${LLVM_AVAILABLE_LIBS} ${UV_A_LIBS} -lm -ldl -lpthread)

View File

@ -0,0 +1,32 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
FROM ubuntu:22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y \
cmake build-essential git wget python3.10 python3-pip
RUN wget -q https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-14/wasi-sdk-14.0-linux.tar.gz && \
tar xf wasi-sdk-*-linux.tar.gz -C /opt && rm -f wasi-sdk-*-linux.tar.gz && \
mv /opt/wasi-sdk-14.0 /opt/wasi-sdk
WORKDIR /home/wamr
COPY core core
COPY build-scripts build-scripts
COPY product-mini product-mini
RUN pip3 install -r core/iwasm/libraries/wasi-nn/test/requirements.txt
WORKDIR /home/wamr/core/iwasm/libraries/wasi-nn/test/build
RUN cmake -DWAMR_BUILD_WASI_NN=1 ..
RUN make -j $(grep -c ^processor /proc/cpuinfo)
WORKDIR /home/wamr/core/iwasm/libraries/wasi-nn/test
RUN ./build.sh
ENTRYPOINT [ "./build/iwasm", "--dir=.", "test_tensorflow.wasm" ]

View File

@ -0,0 +1,20 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# WASM application that uses WASI-NN
/opt/wasi-sdk/bin/clang \
-Wl,--allow-undefined \
-Wl,--strip-all,--no-entry \
--sysroot=/opt/wasi-sdk/share/wasi-sysroot \
-I/home/wamr/core/iwasm/libraries/wasi-nn \
-o test_tensorflow.wasm test_tensorflow.c
# TFLite models to use in the tests
cd models
python3 average.py
python3 max.py
python3 mult_dimension.py
python3 mult_outputs.py
python3 sum.py

View File

@ -0,0 +1,16 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import tensorflow as tf
from utils import save_model
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=[5, 5, 1]),
tf.keras.layers.AveragePooling2D(
pool_size=(5, 5), strides=None, padding="valid", data_format=None)
])
# Export model to tflite
save_model(model, "average.tflite")

View File

@ -0,0 +1,17 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import tensorflow as tf
from utils import save_model
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=[5, 5, 1]),
tf.keras.layers.MaxPooling2D(
pool_size=(5, 5), strides=None, padding="valid", data_format=None)
])
# Export model to tflite
save_model(model, "max.tflite")

View File

@ -0,0 +1,15 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import tensorflow as tf
from utils import save_model
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=[3, 3, 1]),
tf.keras.layers.Conv2D(1, (1, 1), kernel_initializer=tf.keras.initializers.Constant(
value=1), bias_initializer='zeros'
)
])
# Export model to tflite
save_model(model, "mult_dim.tflite")

View File

@ -0,0 +1,33 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import tensorflow as tf
import numpy as np
from keras.layers import AveragePooling2D, Conv2D
from tensorflow.keras import Input, Model
from utils import save_model
inputs = Input(shape=(4, 4, 1))
output1 = Conv2D(1, (4, 1), kernel_initializer=tf.keras.initializers.Constant(
value=1), bias_initializer='zeros'
)(inputs)
output2 = AveragePooling2D(pool_size=(
4, 1), strides=None, padding="valid", data_format=None)(inputs)
model = Model(inputs=inputs, outputs=[output1, output2])
inp = np.arange(16).reshape((1, 4, 4, 1))
print(inp)
res = model.predict(inp)
print(res)
print(res[0].shape)
print(res[1].shape)
save_model(model, "mult_out.tflite")

View File

@ -0,0 +1,17 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import tensorflow as tf
from utils import save_model
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=[5, 5, 1]),
tf.keras.layers.Conv2D(1, (5, 5), kernel_initializer=tf.keras.initializers.Constant(
value=1), bias_initializer='zeros'
)
])
# Export model to tflite
save_model(model, "sum.tflite")

View File

@ -0,0 +1,13 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import tensorflow as tf
import pathlib
def save_model(model, filename):
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
tflite_models_dir = pathlib.Path("./")
tflite_model_file = tflite_models_dir/filename
tflite_model_file.write_bytes(tflite_model)

View File

@ -0,0 +1 @@
tensorflow==2.10.0

View File

@ -0,0 +1,301 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <math.h>
#include <assert.h>
#include "wasi_nn.h"
#include <fcntl.h>
#include <errno.h>
#define MAX_MODEL_SIZE 85000000
#define MAX_OUTPUT_TENSOR_SIZE 200
#define INPUT_TENSOR_DIMS 4
#define EPSILON 1e-8
typedef struct {
float *input_tensor;
uint32_t *dim;
uint32_t elements;
} input_info;
// WASI-NN wrappers
error
wasm_load(char *model_name, graph *graph)
{
FILE *pFile = fopen(model_name, "r");
if (pFile == NULL)
return invalid_argument;
uint8_t *buffer;
size_t result;
// allocate memory to contain the whole file:
buffer = (uint8_t *)malloc(sizeof(uint8_t) * MAX_MODEL_SIZE);
if (buffer == NULL) {
fclose(pFile);
return missing_memory;
}
result = fread(buffer, 1, MAX_MODEL_SIZE, pFile);
if (result <= 0) {
fclose(pFile);
free(buffer);
return missing_memory;
}
graph_builder_array arr;
arr.size = 1;
arr.buf = (graph_builder *)malloc(sizeof(graph_builder));
if (arr.buf == NULL) {
fclose(pFile);
free(buffer);
return missing_memory;
}
arr.buf[0].size = result;
arr.buf[0].buf = buffer;
error res = load(&arr, tensorflow, cpu, graph);
fclose(pFile);
free(buffer);
free(arr.buf);
return res;
}
error
wasm_init_execution_context(graph graph, graph_execution_context *ctx)
{
return init_execution_context(graph, ctx);
}
error
wasm_input(graph_execution_context ctx, float *input_tensor, uint32_t *dim)
{
tensor_dimensions dims;
dims.size = INPUT_TENSOR_DIMS;
dims.buf = (uint32_t *)malloc(dims.size * sizeof(uint32_t));
if (dims.buf == NULL)
return missing_memory;
tensor tensor;
tensor.dimensions = &dims;
for (int i = 0; i < tensor.dimensions->size; ++i)
tensor.dimensions->buf[i] = dim[i];
tensor.type = fp32;
tensor.data = (uint8_t *)input_tensor;
error err = set_input(ctx, 0, &tensor);
free(dims.buf);
return err;
}
error
wasm_compute(graph_execution_context ctx)
{
return compute(ctx);
}
error
wasm_get_output(graph_execution_context ctx, uint32_t index, float *out_tensor,
uint32_t *out_size)
{
return get_output(ctx, index, (uint8_t *)out_tensor, out_size);
}
// Inference
float *
run_inference(float *input, uint32_t *input_size, uint32_t *output_size,
char *model_name, uint32_t num_output_tensors)
{
graph graph;
if (wasm_load(model_name, &graph) != success) {
fprintf(stderr, "Error when loading model.");
exit(1);
}
graph_execution_context ctx;
if (wasm_init_execution_context(graph, &ctx) != success) {
fprintf(stderr, "Error when initialixing execution context.");
exit(1);
}
if (wasm_input(ctx, input, input_size) != success) {
fprintf(stderr, "Error when setting input tensor.");
exit(1);
}
if (wasm_compute(ctx) != success) {
fprintf(stderr, "Error when running inference.");
exit(1);
}
float *out_tensor = (float *)malloc(sizeof(float) * MAX_OUTPUT_TENSOR_SIZE);
if (out_tensor == NULL) {
fprintf(stderr, "Error when allocating memory for output tensor.");
exit(1);
}
uint32_t offset = 0;
for (int i = 0; i < num_output_tensors; ++i) {
*output_size = MAX_OUTPUT_TENSOR_SIZE - *output_size;
if (wasm_get_output(ctx, i, &out_tensor[offset], output_size)
!= success) {
fprintf(stderr, "Error when getting input .");
exit(1);
}
offset += *output_size;
}
*output_size = offset;
return out_tensor;
}
// UTILS
input_info
create_input(int *dims)
{
input_info input = { .dim = NULL, .input_tensor = NULL, .elements = 1 };
input.dim = malloc(INPUT_TENSOR_DIMS * sizeof(uint32_t));
if (input.dim)
for (int i = 0; i < INPUT_TENSOR_DIMS; ++i) {
input.dim[i] = dims[i];
input.elements *= dims[i];
}
input.input_tensor = malloc(input.elements * sizeof(float));
for (int i = 0; i < input.elements; ++i)
input.input_tensor[i] = i;
return input;
}
// TESTS
void
test_sum()
{
int dims[] = { 1, 5, 5, 1 };
input_info input = create_input(dims);
uint32_t output_size = 0;
float *output = run_inference(input.input_tensor, input.dim, &output_size,
"models/sum.tflite", 1);
assert(output_size == 1);
assert(fabs(output[0] - 300.0) < EPSILON);
free(input.dim);
free(input.input_tensor);
free(output);
}
void
test_max()
{
int dims[] = { 1, 5, 5, 1 };
input_info input = create_input(dims);
uint32_t output_size = 0;
float *output = run_inference(input.input_tensor, input.dim, &output_size,
"models/max.tflite", 1);
assert(output_size == 1);
assert(fabs(output[0] - 24.0) < EPSILON);
printf("Result: max is %f\n", output[0]);
free(input.dim);
free(input.input_tensor);
free(output);
}
void
test_average()
{
int dims[] = { 1, 5, 5, 1 };
input_info input = create_input(dims);
uint32_t output_size = 0;
float *output = run_inference(input.input_tensor, input.dim, &output_size,
"models/average.tflite", 1);
assert(output_size == 1);
assert(fabs(output[0] - 12.0) < EPSILON);
printf("Result: average is %f\n", output[0]);
free(input.dim);
free(input.input_tensor);
free(output);
}
void
test_mult_dimensions()
{
int dims[] = { 1, 3, 3, 1 };
input_info input = create_input(dims);
uint32_t output_size = 0;
float *output = run_inference(input.input_tensor, input.dim, &output_size,
"models/mult_dim.tflite", 1);
assert(output_size == 9);
for (int i = 0; i < 9; i++)
assert(fabs(output[i] - i) < EPSILON);
free(input.dim);
free(input.input_tensor);
free(output);
}
void
test_mult_outputs()
{
int dims[] = { 1, 4, 4, 1 };
input_info input = create_input(dims);
uint32_t output_size = 0;
float *output = run_inference(input.input_tensor, input.dim, &output_size,
"models/mult_out.tflite", 2);
assert(output_size == 8);
// first tensor check
for (int i = 0; i < 4; i++)
assert(fabs(output[i] - (i * 4 + 24)) < EPSILON);
// second tensor check
for (int i = 0; i < 4; i++)
assert(fabs(output[i + 4] - (i + 6)) < EPSILON);
free(input.dim);
free(input.input_tensor);
free(output);
}
int
main()
{
printf("################### Testing sum...\n");
test_sum();
printf("################### Testing max...\n");
test_max();
printf("################### Testing average...\n");
test_average();
printf("################### Testing multiple dimensions...\n");
test_mult_dimensions();
printf("################### Testing multiple outputs...\n");
test_mult_outputs();
printf("Tests: passed!\n");
return 0;
}

View File

@ -0,0 +1,10 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set (WASI_NN_DIR ${CMAKE_CURRENT_LIST_DIR})
add_definitions (-DWASM_ENABLE_WASI_NN=1)
set (LIBC_WASI_NN_SOURCE ${WASI_NN_DIR}/wasi_nn_native.c ${WASI_NN_DIR}/wasi_nn_tensorflow.cpp)
set (TENSORFLOW_LIB tensorflow-lite)

View File

@ -0,0 +1,132 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_WASM_H
#define WASI_NN_WASM_H
#include "wasi_nn_common.h"
/**
* Following definition from:
* [Aug 10th, 2022]
* https://github.com/WebAssembly/wasi-nn/blob/e5e1a6c31f424c7cd63026cd270e9746775675a0/wasi-nn.wit.md
*/
/* The graph initialization data. */
// This consists of an array of buffers because implementing backends may encode
// their graph IR in parts (e.g., OpenVINO stores its IR and weights
// separately).
typedef struct {
uint8_t *buf;
uint32_t size;
} graph_builder;
typedef struct {
graph_builder *buf;
uint32_t size;
} graph_builder_array;
/* The dimensions of a tensor. */
// The array length matches the tensor rank and each element in the array
// describes the size of each dimension.
typedef struct {
uint32_t *buf;
uint32_t size;
} tensor_dimensions;
/* The tensor data. */
// Initially conceived as a sparse representation, each empty cell would be
// filled with zeros and the array length must match the product of all of the
// dimensions and the number of bytes in the type (e.g., a 2x2 tensor with
// 4-byte f32 elements would have a data array of length 16). Naturally, this
// representation requires some knowledge of how to lay out data in
// memory--e.g., using row-major ordering--and could perhaps be improved.
typedef uint8_t *tensor_data;
/* A tensor. */
typedef struct {
// Describe the size of the tensor (e.g., 2x2x2x2 -> [2, 2, 2, 2]). To
// represent a tensor containing a single value, use `[1]` for the tensor
// dimensions.
tensor_dimensions *dimensions;
// Describe the type of element in the tensor (e.g., f32).
tensor_type type;
// Contains the tensor data.
tensor_data data;
} tensor;
/**
* @brief Load an opaque sequence of bytes to use for inference.
*
* @param builder Model builder.
* @param encoding Model encoding.
* @param target Execution target.
* @param graph Graph.
* @return error Execution status.
*/
error
load(graph_builder_array *builder, graph_encoding encoding,
execution_target target, graph *graph)
__attribute__((export_module("wasi_nn")))
__attribute__((import_module("wasi_nn")));
/**
* @brief Create an execution instance of a loaded graph.
*
* @param graph Graph.
* @param ctx Execution context.
* @return error Execution status.
*/
error
init_execution_context(graph graph, graph_execution_context *ctx)
__attribute__((export_module("wasi_nn")))
__attribute__((import_module("wasi_nn")));
/**
* @brief Define the inputs to use for inference.
*
* @param ctx Execution context.
* @param index Input tensor index.
* @param tensor Input tensor.
* @return error Execution status.
*/
error
set_input(graph_execution_context ctx, uint32_t index, tensor *tensor)
__attribute__((export_module("wasi_nn")))
__attribute__((import_module("wasi_nn")));
/**
* @brief Compute the inference on the given inputs.
*
* @param ctx Execution context.
* @return error Execution status.
*/
error
compute(graph_execution_context ctx) __attribute__((export_module("wasi_nn")))
__attribute__((import_module("wasi_nn")));
/**
* @brief Extract the outputs after inference.
*
* @param ctx Execution context.
* @param index Output tensor index.
* @param output_tensor Buffer where output tensor with index `index` is
* copied.
* @param output_tensor_size Pointer to `output_tensor` maximum size.
* After the function call it is updated with the
* copied number of bytes.
* @return error Execution status.
*/
error
get_output(graph_execution_context ctx, uint32_t index,
tensor_data output_tensor, uint32_t *output_tensor_size)
__attribute__((export_module("wasi_nn")))
__attribute__((import_module("wasi_nn")));
#endif

View File

@ -0,0 +1,44 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_COMMON_H
#define WASI_NN_COMMON_H
#include <stdint.h>
// The type of the elements in a tensor.
typedef enum { fp16 = 0, fp32, up8, ip32 } tensor_type;
// Describes the encoding of the graph. This allows the API to be implemented by
// various backends that encode (i.e., serialize) their graph IR with different
// formats.
typedef enum { openvino = 0, onnx, tensorflow, pytorch } graph_encoding;
// Define where the graph should be executed.
typedef enum { cpu = 0, gpu, tpu } execution_target;
// Error codes returned by functions in this API.
typedef enum {
// No error occurred.
success = 0,
// Caller module passed an invalid argument.
invalid_argument,
// Invalid encoding.
invalid_encoding,
// Caller module is missing a memory export.
missing_memory,
// Device or resource busy.
busy,
// Runtime Error.
runtime_error,
} error;
// An execution graph for performing inference (i.e., a model).
typedef uint32_t graph;
// Bind a `graph` to the input and output tensors for an inference.
typedef uint32_t graph_execution_context;
#endif

View File

@ -0,0 +1,264 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdio.h>
#include <assert.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include "wasi_nn_common.h"
#include "wasm_export.h"
#include "bh_platform.h"
#include "wasi_nn.h"
#include "wasi_nn_tensorflow.hpp"
#include "logger.h"
/* Definition of 'wasi_nn.h' structs in WASM app format (using offset) */
typedef struct {
uint32_t buf_offset;
uint32_t size;
} graph_builder_wasm;
typedef struct {
uint32_t buf_offset;
uint32_t size;
} graph_builder_array_wasm;
typedef struct {
uint32_t dimensions_offset;
tensor_type type;
uint32_t data_offset;
} tensor_wasm;
typedef struct {
uint32_t buf_offset;
uint32_t size;
} tensor_dimensions_wasm;
/* Global variables */
static uint8_t _is_initialized;
static graph_encoding _encoding;
/* Utils */
static error
check_initialized()
{
if (!_is_initialized) {
NN_ERR_PRINTF("Model not initialized.");
return invalid_argument;
}
if (_encoding != tensorflow) {
NN_ERR_PRINTF("Model encoding is not tensorflow.");
return invalid_argument;
}
return success;
}
/* WASI-NN implementation */
error
wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
graph_encoding encoding, execution_target target, graph *graph)
{
NN_DBG_PRINTF("Running wasi_nn_load [encoding=%d, target=%d]...", encoding,
target);
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
bh_assert(instance);
if (!wasm_runtime_validate_native_addr(instance, builder,
sizeof(graph_builder_array_wasm)))
return invalid_argument;
if (!wasm_runtime_validate_app_addr(instance, builder->buf_offset,
builder->size * sizeof(uint32_t)))
return invalid_argument;
NN_DBG_PRINTF("Graph builder array contains %d elements", builder->size);
graph_builder_wasm *gb_wasm =
(graph_builder_wasm *)wasm_runtime_addr_app_to_native(
instance, builder->buf_offset);
graph_builder *gb_native = (graph_builder *)wasm_runtime_malloc(
builder->size * sizeof(graph_builder));
if (gb_native == NULL)
return missing_memory;
for (int i = 0; i < builder->size; ++i) {
if (!wasm_runtime_validate_app_addr(instance, gb_wasm[i].buf_offset,
gb_wasm[i].size
* sizeof(uint8_t))) {
wasm_runtime_free(gb_native);
return invalid_argument;
}
gb_native[i].buf = (uint8_t *)wasm_runtime_addr_app_to_native(
instance, gb_wasm[i].buf_offset);
gb_native[i].size = gb_wasm[i].size;
NN_DBG_PRINTF("Graph builder %d contains %d elements", i,
gb_wasm[i].size);
}
graph_builder_array gba_native = { .buf = gb_native,
.size = builder->size };
if (!wasm_runtime_validate_native_addr(instance, graph, sizeof(graph))) {
wasm_runtime_free(gb_native);
return invalid_argument;
}
switch (encoding) {
case tensorflow:
break;
default:
NN_ERR_PRINTF("Only tensorflow is supported.");
wasm_runtime_free(gb_native);
return invalid_argument;
}
_encoding = encoding;
_is_initialized = 1;
error res = tensorflow_load(gba_native, _encoding, target, graph);
NN_DBG_PRINTF("wasi_nn_load finished with status %d [graph=%d]", res,
*graph);
wasm_runtime_free(gb_native);
return res;
}
error
wasi_nn_init_execution_context(wasm_exec_env_t exec_env, graph graph,
graph_execution_context *ctx)
{
NN_DBG_PRINTF("Running wasi_nn_init_execution_context [graph=%d]...",
graph);
error res;
if (success != (res = check_initialized()))
return res;
res = tensorflow_init_execution_context(graph);
*ctx = graph;
NN_DBG_PRINTF(
"wasi_nn_init_execution_context finished with status %d [ctx=%d]", res,
*ctx);
return res;
}
error
wasi_nn_set_input(wasm_exec_env_t exec_env, graph_execution_context ctx,
uint32_t index, tensor_wasm *input_tensor)
{
NN_DBG_PRINTF("Running wasi_nn_set_input [ctx=%d, index=%d]...", ctx,
index);
error res;
if (success != (res = check_initialized()))
return res;
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
bh_assert(instance);
if (!wasm_runtime_validate_native_addr(instance, input_tensor,
sizeof(tensor_wasm)))
return invalid_argument;
if (!wasm_runtime_validate_app_addr(
instance, input_tensor->dimensions_offset, sizeof(uint32_t)))
return invalid_argument;
tensor_dimensions_wasm *dimensions_w =
(tensor_dimensions_wasm *)wasm_runtime_addr_app_to_native(
instance, input_tensor->dimensions_offset);
if (!wasm_runtime_validate_app_addr(instance, dimensions_w->buf_offset,
dimensions_w->size * sizeof(uint32_t)))
return invalid_argument;
tensor_dimensions dimensions = {
.buf = (uint32_t *)wasm_runtime_addr_app_to_native(
instance, dimensions_w->buf_offset),
.size = dimensions_w->size
};
NN_DBG_PRINTF("Number of dimensions: %d", dimensions.size);
int total_elements = 1;
for (int i = 0; i < dimensions.size; ++i) {
NN_DBG_PRINTF("Dimension %d: %d", i, dimensions.buf[i]);
total_elements *= dimensions.buf[i];
}
NN_DBG_PRINTF("Tensor type: %d", input_tensor->type);
if (!wasm_runtime_validate_app_addr(instance, input_tensor->data_offset,
total_elements))
return invalid_argument;
tensor tensor = { .type = input_tensor->type,
.dimensions = &dimensions,
.data = (uint8_t *)wasm_runtime_addr_app_to_native(
instance, input_tensor->data_offset) };
res = tensorflow_set_input(ctx, index, &tensor);
NN_DBG_PRINTF("wasi_nn_set_input finished with status %d", res);
return res;
}
error
wasi_nn_compute(wasm_exec_env_t exec_env, graph_execution_context ctx)
{
NN_DBG_PRINTF("Running wasi_nn_compute [ctx=%d]...", ctx);
error res;
if (success != (res = check_initialized()))
return res;
res = tensorflow_compute(ctx);
NN_DBG_PRINTF("wasi_nn_compute finished with status %d", res);
return res;
}
error
wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
uint32_t index, tensor_data output_tensor,
uint32_t *output_tensor_size)
{
NN_DBG_PRINTF("Running wasi_nn_get_output [ctx=%d, index=%d]...", ctx,
index);
error res;
if (success != (res = check_initialized()))
return res;
res = tensorflow_get_output(ctx, index, output_tensor, output_tensor_size);
NN_DBG_PRINTF("wasi_nn_get_output finished with status %d [data_size=%d]",
res, *output_tensor_size);
return res;
}
/* Register WASI-NN in WAMR */
/* clang-format off */
#define REG_NATIVE_FUNC(func_name, signature) \
{ #func_name, wasi_nn_##func_name, signature, NULL }
/* clang-format on */
static NativeSymbol native_symbols_wasi_nn[] = {
REG_NATIVE_FUNC(load, "(*ii*)i"),
REG_NATIVE_FUNC(init_execution_context, "(i*)i"),
REG_NATIVE_FUNC(set_input, "(ii*)i"),
REG_NATIVE_FUNC(compute, "(i)i"),
REG_NATIVE_FUNC(get_output, "(ii**)i"),
};
uint32_t
get_wasi_nn_export_apis(NativeSymbol **p_libc_wasi_apis)
{
*p_libc_wasi_apis = native_symbols_wasi_nn;
return sizeof(native_symbols_wasi_nn) / sizeof(NativeSymbol);
}

View File

@ -0,0 +1,188 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "wasi_nn_tensorflow.hpp"
#include "wasi_nn_common.h"
#include "bh_common.h"
#include "bh_platform.h"
#include "platform_common.h"
#include <tensorflow/lite/interpreter.h>
#include <tensorflow/lite/kernels/register.h>
#include <tensorflow/lite/model.h>
#include <tensorflow/lite/optional_debug_tools.h>
#include <tensorflow/lite/error_reporter.h>
/* Global variables */
static std::unique_ptr<tflite::Interpreter> interpreter;
static std::unique_ptr<tflite::FlatBufferModel> model;
static char *model_pointer = NULL;
/* WASI-NN (tensorflow) implementation */
error
tensorflow_load(graph_builder_array builder, graph_encoding encoding,
execution_target target, graph *graph)
{
if (model_pointer != NULL) {
wasm_runtime_free(model_pointer);
model_pointer = NULL;
}
if (builder.size != 1) {
NN_ERR_PRINTF("Unexpected builder format.");
return invalid_argument;
}
if (encoding != tensorflow) {
NN_ERR_PRINTF("Encoding is not tensorflow.");
return invalid_argument;
}
if (target != cpu) {
NN_ERR_PRINTF("Only CPU target is supported.");
return invalid_argument;
}
uint32_t size = builder.buf[0].size;
model_pointer = (char *)wasm_runtime_malloc(size);
if (model_pointer == NULL) {
NN_ERR_PRINTF("Error when allocating memory for model.");
return missing_memory;
}
bh_memcpy_s(model_pointer, size, builder.buf[0].buf, size);
model = tflite::FlatBufferModel::BuildFromBuffer(model_pointer, size, NULL);
if (model == NULL) {
NN_ERR_PRINTF("Loading model error.");
wasm_runtime_free(model_pointer);
model_pointer = NULL;
return missing_memory;
}
// Build the interpreter with the InterpreterBuilder.
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder tflite_builder(*model, resolver);
tflite_builder(&interpreter);
if (interpreter == NULL) {
NN_ERR_PRINTF("Error when generating the interpreter.");
wasm_runtime_free(model_pointer);
model_pointer = NULL;
return missing_memory;
}
return success;
}
error
tensorflow_init_execution_context(graph graph)
{
if (interpreter == NULL) {
NN_ERR_PRINTF("Non-initialized interpreter.");
return runtime_error;
}
interpreter->AllocateTensors();
return success;
}
error
tensorflow_set_input(graph_execution_context ctx, uint32_t index,
tensor *input_tensor)
{
if (interpreter == NULL) {
NN_ERR_PRINTF("Non-initialized interpreter.");
return runtime_error;
}
uint32_t num_tensors = interpreter->inputs().size();
NN_DBG_PRINTF("Number of tensors (%d)", num_tensors);
if (index + 1 > num_tensors) {
return runtime_error;
}
auto tensor = interpreter->input_tensor(index);
if (tensor == NULL) {
NN_ERR_PRINTF("Missing memory");
return missing_memory;
}
uint32_t model_tensor_size = 1;
for (int i = 0; i < (int)tensor->dims->size; ++i)
model_tensor_size *= (uint32_t)tensor->dims->data[i];
uint32_t input_tensor_size = 1;
for (int i = 0; i < input_tensor->dimensions->size; i++)
input_tensor_size *= (uint32_t)input_tensor->dimensions->buf[i];
if (model_tensor_size != input_tensor_size) {
NN_ERR_PRINTF("Input tensor shape from the model is different than the "
"one provided");
return invalid_argument;
}
auto *input = interpreter->typed_input_tensor<float>(index);
if (input == NULL)
return missing_memory;
bh_memcpy_s(input, model_tensor_size * sizeof(float), input_tensor->data,
model_tensor_size * sizeof(float));
return success;
}
error
tensorflow_compute(graph_execution_context ctx)
{
if (interpreter == NULL) {
NN_ERR_PRINTF("Non-initialized interpreter.");
return runtime_error;
}
interpreter->Invoke();
return success;
}
error
tensorflow_get_output(graph_execution_context context, uint32_t index,
tensor_data output_tensor, uint32_t *output_tensor_size)
{
if (interpreter == NULL) {
NN_ERR_PRINTF("Non-initialized interpreter.");
return runtime_error;
}
uint32_t num_output_tensors = interpreter->outputs().size();
NN_DBG_PRINTF("Number of tensors (%d)", num_output_tensors);
if (index + 1 > num_output_tensors) {
return runtime_error;
}
auto tensor = interpreter->output_tensor(index);
if (tensor == NULL) {
NN_ERR_PRINTF("Missing memory");
return missing_memory;
}
uint32_t model_tensor_size = 1;
for (int i = 0; i < (int)tensor->dims->size; ++i)
model_tensor_size *= (uint32_t)tensor->dims->data[i];
if (*output_tensor_size < model_tensor_size) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return missing_memory;
}
float *tensor_f = interpreter->typed_output_tensor<float>(index);
for (int i = 0; i < model_tensor_size; ++i)
NN_DBG_PRINTF("output: %f", tensor_f[i]);
*output_tensor_size = model_tensor_size;
bh_memcpy_s(output_tensor, model_tensor_size * sizeof(float), tensor_f,
model_tensor_size * sizeof(float));
return success;
}

View File

@ -0,0 +1,40 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_TENSORFLOW_HPP
#define WASI_NN_TENSORFLOW_HPP
#include <stdio.h>
#include "wasi_nn.h"
#include "logger.h"
#ifdef __cplusplus
extern "C" {
#endif
error
tensorflow_load(graph_builder_array builder, graph_encoding encoding,
execution_target target, graph *graph);
error
tensorflow_init_execution_context(graph graph);
error
tensorflow_set_input(graph_execution_context ctx, uint32_t index,
tensor *input_tensor);
error
tensorflow_compute(graph_execution_context ctx);
error
tensorflow_get_output(graph_execution_context context, uint32_t index,
tensor_data output_tensor, uint32_t *output_tensor_size);
#ifdef __cplusplus
}
#endif
#endif