Skip to content

Commit

Permalink
Merge pull request #3130 from alibaba/feature/sync
Browse files Browse the repository at this point in the history
MNN:Sync: Sync Interal 3.0.2
  • Loading branch information
jxt1234 authored Dec 20, 2024
2 parents 85291d0 + c7e8d70 commit 3ee08ac
Show file tree
Hide file tree
Showing 158 changed files with 6,702 additions and 5,785 deletions.
7 changes: 0 additions & 7 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -361,10 +361,3 @@ pymnn_build/

# mnncompress generated
MNN_compression_pb2.py

# model path
model/

# datasets
datasets/*
!datasets/*.sh
18 changes: 18 additions & 0 deletions 3rd_party/OpenCLHeaders/CL/cl2.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -3810,6 +3810,24 @@ class Buffer : public Memory
}
}

Buffer(
const Context& context,
cl_mem_flags flags,
const cl_import_properties_arm *properties,
void *memory,
size_type size,
cl_int* err = NULL)
{
cl_int error;
object_ = ::clImportMemoryARM(context(), flags, properties, memory, size, &error);

detail::errHandler(error, __CREATE_BUFFER_ERR);
if (err != NULL) {
*err = error;
}
}


/*!
* \brief Construct a Buffer from a host container via iterators using a specified context.
* IteratorType must be random access.
Expand Down
22 changes: 22 additions & 0 deletions 3rd_party/OpenCLHeaders/CL/cl_ext.h
Original file line number Diff line number Diff line change
Expand Up @@ -430,6 +430,23 @@ typedef struct _cl_mem_android_native_buffer_host_ptr
} cl_mem_android_native_buffer_host_ptr;


/*********************************
* cl_qcom_ahardwarebuffer_host_ptr extension
*********************************/

#define CL_MEM_ANDROID_AHARDWAREBUFFER_HOST_PTR_QCOM 0x4119

typedef struct _cl_mem_ahardwarebuffer_host_ptr
{
/* Type of external memory allocation. */
/* Must be CL_MEM_ANDROID_AHARDWAREBUFFER_HOST_PTR_QCOM for Android Hardware buffers. */
cl_mem_ext_host_ptr ext_host_ptr;

/* Virtual pointer to the android hardware buffer */
void* ahb_ptr;

} cl_mem_ahardwarebuffer_host_ptr;

/******************************************
* cl_img_yuv_image extension *
******************************************/
Expand Down Expand Up @@ -583,6 +600,11 @@ typedef intptr_t cl_import_properties_arm;

/* Protected DMA BUF memory type value for CL_IMPORT_TYPE_ARM property */
#define CL_IMPORT_TYPE_PROTECTED_ARM 0x40B5
#define CL_IMPORT_TYPE_ANDROID_HARDWARE_BUFFER_ARM 0x41E2
#define CL_IMPORT_DMA_BUF_DATA_CONSISTENCY_WITH_HOST_ARM 0x41E3
#define CL_IMPORT_MEMORY_WHOLE_ALLOCATION_ARM SIZE_MAX
#define CL_IMPORT_ANDROID_HARDWARE_BUFFER_PLANE_INDEX_ARM 0x41EF
#define CL_IMPORT_ANDROID_HARDWARE_BUFFER_LAYER_INDEX_ARM 0x41F0

/* This extension adds a new function that allows for direct memory import into
* OpenCL via the clImportMemoryARM function.
Expand Down
32 changes: 27 additions & 5 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,7 @@ endif()
project(MNN VERSION ${MNN_VERSION} LANGUAGES C CXX ASM)
# complier options
set(CMAKE_C_STANDARD 99)
IF (NOT (CMAKE_CXX_STANDARD EQUAL 17))
set(CMAKE_CXX_STANDARD 11)
ENDIF()
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_MODULE_PATH
${CMAKE_MODULE_PATH}
"${CMAKE_CURRENT_LIST_DIR}/cmake"
Expand All @@ -49,7 +47,7 @@ option(MNN_BUILD_TOOLS "Build tools/cpp or not" ON)
option(MNN_BUILD_QUANTOOLS "Build Quantized Tools or not" OFF)
option(MNN_EVALUATION "Build Evaluation Tools or not" OFF)
option(MNN_BUILD_CONVERTER "Build Converter" OFF)
option(MNN_SUPPORT_DEPRECATED_OP "Enable MNN's tflite quantized op" ON)
option(MNN_SUPPORT_DEPRECATED_OP "Enable MNN's tflite quantized op" OFF)
option(MNN_DEBUG_MEMORY "MNN Debug Memory Access" OFF)
option(MNN_DEBUG_TENSOR_SIZE "Enable Tensor Size" OFF)
option(MNN_GPU_TRACE "Enable MNN Gpu Debug" OFF)
Expand All @@ -74,6 +72,7 @@ option(MNN_JNI "Build MNN Jni for java to use" OFF)
option(MNN_SUPPORT_BF16 "Enable MNN's bf16 op" OFF)
option(MNN_LOW_MEMORY "Build MNN support low memory for weight quant model." OFF)
option(MNN_CPU_WEIGHT_DEQUANT_GEMM "Build MNN CPU weight dequant related gemm kernels." OFF)
option(MNN_BUILD_AUDIO "Build audio api in MNN." OFF)

IF (OHOS AND MNN_INTERNAL)
include($ENV{NODE_PATH}/@ali/tcpkg/tcpkg.cmake)
Expand Down Expand Up @@ -192,6 +191,9 @@ endif()
if(MNN_SUPPORT_TRANSFORMER_FUSE)
add_definitions(-DMNN_SUPPORT_TRANSFORMER_FUSE)
endif()
if(MNN_BUILD_AUDIO)
add_definitions(-DMNN_BUILD_AUDIO)
endif()
# debug options
if(MNN_DEBUG_MEMORY)
add_definitions(-DMNN_DEBUG_MEMORY)
Expand Down Expand Up @@ -287,7 +289,7 @@ if(CMAKE_SYSTEM_NAME MATCHES "^Android")
endif()
option(MNN_USE_CPP11 "Enable MNN use c++11" ON)
if (NOT MSVC)
if((MNN_CUDA AND MNN_SUPPORT_TRANSFORMER_FUSE) OR (CMAKE_CXX_STANDARD EQUAL 17))
if(MNN_CUDA AND MNN_SUPPORT_TRANSFORMER_FUSE)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
Expand Down Expand Up @@ -463,6 +465,10 @@ IF(MNN_BUILD_OPENCV)
list(APPEND MNN_EXTRA_HEADERS ${MNN_CV_HDRS})
list(APPEND MNN_EXTRA_HEADERS ${MNN_CV_IMGHDRS})
ENDIF()
IF(MNN_BUILD_AUDIO)
file(GLOB MNN_AUDIO_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/tools/audio/include/audio/*.hpp PARENT_SCOPE)
list(APPEND MNN_EXTRA_HEADERS ${MNN_AUDIO_HDRS})
ENDIF()
IF(MNN_BUILD_LLM)
file(GLOB MNN_LLM_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/transformers/llm/engine/include/llm/*)
list(APPEND MNN_EXTRA_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/transformers/llm/engine/include/llm/llm.hpp)
Expand Down Expand Up @@ -775,6 +781,14 @@ IF(MNN_BUILD_OPENCV AND NOT MNN_SEP_BUILD)
ENDIF()
target_sources(MNN PRIVATE $<TARGET_OBJECTS:MNNOpenCV>)
ENDIF()
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/tools/audio)
IF(MNN_BUILD_AUDIO AND NOT MNN_SEP_BUILD)
IF(MSVC)
target_compile_definitions(MNNAudio PRIVATE "-DBUILDING_MNN_DLL" INTERFACE "-DUSING_MNN_DLL")
ENDIF()
message(STATUC "### build MNNAudio into MNN")
target_sources(MNN PRIVATE $<TARGET_OBJECTS:MNNAudio>)
ENDIF()


if(CMAKE_SYSTEM_NAME MATCHES "^Linux")
Expand Down Expand Up @@ -884,6 +898,14 @@ ELSE()
SET_SOURCE_FILES_PROPERTIES(${HDR} PROPERTIES MACOSX_PACKAGE_LOCATION Headers/cv/imgproc )
ENDFOREACH()
ENDIF()
IF(MNN_BUILD_AUDIO)
if (NOT MNN_AAPL_FMWK)
INSTALL(FILES ${MNN_AUDIO_HDRS} DESTINATION include/MNN/audio)
endif()
FOREACH(HDR ${MNN_AUDIO_HDRS})
SET_SOURCE_FILES_PROPERTIES(${HDR} PROPERTIES MACOSX_PACKAGE_LOCATION Headers/audio/ )
ENDFOREACH()
ENDIF()
IF(MNN_BUILD_LLM)
if (NOT MNN_AAPL_FMWK)
INSTALL(FILES ${MNN_LLM_HDRS} DESTINATION include/MNN/llm)
Expand Down
4 changes: 3 additions & 1 deletion docs/compile/cmake.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ MNN使用CMake构建项目,CMake中的宏定义列表如下:
| MNN_BUILD_QUANTOOLS | 是否构建MNN的量化工具,默认为`OFF` |
| MNN_EVALUATION | 是否构建MNN的评估工具,默认为`OFF` |
| MNN_BUILD_CONVERTER | 是否构建MNN的转换工具,默认为`OFF` |
| MNN_SUPPORT_DEPRECATED_OP | 是否支持Tflite的量化算子,默认为`ON` |
| MNN_SUPPORT_DEPRECATED_OP | 是否支持Tflite的量化算子等已经废弃的算子,用于兼容历史模型(1.1.0版本之前),默认为`OFF` |
| MNN_DEBUG_MEMORY | 是否开启MNN内存调试,默认为`OFF` |
| MNN_DEBUG_TENSOR_SIZE | 是否开启MNN tensor size调试,默认为`OFF` |
| MNN_GPU_TRACE | 是否开启MNN GPU调试,默认为`OFF` |
Expand All @@ -32,6 +32,7 @@ MNN使用CMake构建项目,CMake中的宏定义列表如下:
| MNN_ENABLE_COVERAGE | 是否开启MNN的代码覆盖率,默认为`OFF` |
| MNN_BUILD_PROTOBUFFER | 是否使用MNN中的`protobuffer`,默认为`ON` |
| MNN_BUILD_OPENCV | 是否构建MNN的OpenCV功能,默认为`OFF` |
| MNN_BUILD_AUDIO | 是否构建MNN的Audio功能,默认为`OFF` |
| MNN_INTERNAL | 是否构建MNN的一些内部功能,如:日志;默认为`OFF` |
| MNN_JNI | 是否构建MNN的JNI支持,默认为`OFF` |
| MNN_METAL | 是否构建`Metal`后端,默认为`OFF` |
Expand Down Expand Up @@ -79,6 +80,7 @@ MNN使用CMake构建项目,CMake中的宏定义列表如下:
| MNN_CVCORE | 构建MNN的OpenCV功能是否开启`core`功能,默认为`ON` |
| MNN_OPENCV_TEST | 构建MNN的OpenCV功能是否开启单元测试,默认为`OFF` |
| MNN_OPENCV_BENCH | 构建MNN的OpenCV功能是否开启性能benchmark,默认为`OFF` |
| MNN_AUDIO_TEST | 构建MNN的Audio功能是否开启单元测试,默认为`OFF` |
| MNN_VULKAN_IMAGE | 构建MNN的Vulkan后端时采用Image内存模式,以便支持FP16和部分移动端上GPU的加速,默认为`ON` |
| MNN_LOW_MEMORY | 是否支持低内存模式,支持低内存模式使用权值量化模型并设置`low_memory`则会使用计算时反量化,默认为`OFF` |
| MNN_CPU_WEIGHT_DEQUANT_GEMM | 是否编译CPU权重反量化的矩阵乘Kernel, 如果打开该编译宏并且在CPU推理时设置MNN::BackendConfig::MemoryMode=Memory_Normal,就会使用权重反量化算子进行权重量化模型的推理,默认为`OFF` |
Expand Down
13 changes: 13 additions & 0 deletions docs/compile/other.md
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,19 @@
- `libMNNOpenCV.so` MNN OpenCV函数库
- `opencv_test` MNN OpenCV单元测试
- `opencv_bench` MNN OpenCV性能测试
## MNN Audio库
- 相关编译选项
- `MNN_BUILD_AUDIO` 是否编译Audio函数接口
- `MNN_AUDIO_TEST` 是否编译Audio单元测试
- 编译命令
```bash
mkdir build && cd build
cmake .. -MNN_BUILD_AUDIO=ON -MNN_AUDIO_TEST=ON
make -j4
```
- 编译产物
- `libMNNAudio.so` MNN Audio函数库
- `audio_test` MNN Audio单元测试

## 示例工程
- 相关编译选项
Expand Down
51 changes: 16 additions & 35 deletions docs/transformers/llm.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ python llmexport.py \

### 功能
- 直接转为mnn模型,使用`--export mnn`,注意,你需要先安装pymnn或者通过`--mnnconvert`选项指定MNNConvert工具的地址,两种条件必须满足其中一个。如果没有安装pymnn并且没有通过`--mnnconvert`指定MNNConvert工具的地址,那么llmexport.py脚本会在目录"../../../build/"下寻找MNNConvert工具,需保证该目录下存在MNNConvert文件。此方案目前支持导出4bit和8bit模型
- 如果直接转为mnn模型遇到问题,或者需要其他bits数的量化(如5bit/6bit),可以先将模型先转为onnx模型,使用`--export onnx`,然后使用./MNNConvert工具将onnx模型转为mnn模型:
- 如果直接转为mnn模型遇到问题,或者需要其他bits数的量化(如5bit/6bit),可以先将模型先转为onnx模型,使用`--export onnx`,然后使用./MNNConvert工具将onnx模型转为mnn模型:

```
./MNNConvert --modelFile ../transformers/llm/export/model/onnx/llm.onnx --MNNModel llm.mnn --keepInputFormat --weightQuantBits=4 --weightQuantBlock=128 -f ONNX --transformerFuse=1 --allowCustomOp --saveExternalData
Expand Down Expand Up @@ -98,13 +98,17 @@ options:
[从源码编译](../compile/other.html#id4)
在原有编译过程中增加必需编译宏即可:
```
-DMNN_LOW_MEMORY=true -DMNN_CPU_WEIGHT_DEQUANT_GEMM=true -DMNN_BUILD_LLM=true -DMNN_SUPPORT_TRANSFORMER_FUSE=true
-DMNN_LOW_MEMORY=true -DMNN_CPU_WEIGHT_DEQUANT_GEMM=true -DMNN_BUILD_LLM=true -DMNN_SUPPORT_TRANSFORMER_FUSE=true
```

- 需要开启视觉功能时,增加相关编译宏
```
-DLLM_SUPPORT_VISION=true -DMNN_BUILD_OPENCV=true -DMNN_IMGCODECS=true
```
- 需要开启音频功能时,增加相关编译宏
```
-DLLM_SUPPORT_AUDIO=true
```

#### mac / linux / windows

Expand Down Expand Up @@ -137,7 +141,7 @@ sh package_scripts/ios/buildiOS.sh "-DMNN_ARM82=true -DMNN_LOW_MEMORY=true -DMNN
```

#### Web
环境配置参考 https://mnn-docs.readthedocs.io/en/latest/compile/engine.html#web
环境配置参考 https://mnn-docs.readthedocs.io/en/latest/compile/engine.html#web

- 编译库,产出 `libMNN.a``libMNN_Express.a``libllm.a`

Expand Down Expand Up @@ -189,7 +193,7 @@ node llm_demo.js ~/qwen2.0_1.5b/config.json ~/qwen2.0_1.5b/prompt.txt
- visual_model: 当使用VL模型时,visual_model的实际路径为`base_dir + visual_model`,默认为`base_dir + 'visual.mnn'`
- 推理配置
- max_new_tokens: 生成时最大token数,默认为`512`
- reuse_kv: 多轮对话时是否复用之前对话的`kv cache`,默认为`false`, 目前只有CPU后端支持设置为`true`.
- reuse_kv: 多轮对话时是否复用之前对话的`kv cache`,默认为`false`
- quant_qkv: CPU attention 算子中`query, key, value`是否量化,可选为:`0, 1, 2, 3, 4`,默认为`0`,含义如下:
- 0: key和value都不量化
- 1: 使用非对称8bit量化存储key
Expand All @@ -205,19 +209,6 @@ node llm_demo.js ~/qwen2.0_1.5b/config.json ~/qwen2.0_1.5b/prompt.txt
- thread_num: CPU推理使用硬件线程数,默认为:`4`; OpenCL推理时使用`68`
- precision: 推理使用精度策略,默认为:`"low"`,尽量使用`fp16`
- memory: 推理使用内存策略,默认为:`"low"`,开启运行时量化
- Sampler配置
- sampler_type: 使用的sampler种类,目前支持`greedy`, `temperature`, `topK`, `topP`, `minP`, `tfs`, `typical`, `penalty`8种基本sampler,外加`mixed`(混合sampler)。当选择`mixed`时,依次执行mixed_samplers中的sampler。默认为`mixed`
- mixed_samplers: 当`sampler_type``mixed`时有效,默认为`["topK", "tfs", "typical", "topP", "min_p", "temperature"]`
- temperature: `temperature`, `topP`, `minP`, `tfsZ`, `typical`中temerature值,默认为1.0
- topK: `topK`中top K 个的个数,默认为40
- topP: `topP`中top P的值,默认为0.9
- minP: `minP`中min P的值,默认为0.1
- tfsZ: `tfs`中Z的值,默认为1.0,即不使用tfs算法
- typical: `typical`中p的值,默认为1.0,即不使用typical算法
- penalty: `penalty`中对于logits的惩罚项,默认为0.0,即不惩罚
- n_gram: `penalty`中最大存储的ngram大小,默认为8
- ngram_factor: `penalty`中对于重复ngram的额外惩罚,默认为1.0,即没有额外惩罚
- penalty_sampler: `penalty`中最后一步采用的sampling策略,可选"greedy"或"temperature",默认greedy.

##### 配置文件示例
- `config.json`
Expand All @@ -229,15 +220,7 @@ node llm_demo.js ~/qwen2.0_1.5b/config.json ~/qwen2.0_1.5b/prompt.txt
"backend_type": "cpu",
"thread_num": 4,
"precision": "low",
"memory": "low",
"sampler_type": "mixed",
"mixed_samplers": ["topK", "tfs", "typical", "topP", "min_p", "temperature"],
"temperature": 1.0,
"topK": 40,
"topP": 0.9,
"tfsZ": 1.0,
"minP": 0.1,
"reuse_kv": true
"memory": "low"
}
```
- `llm_config.json`
Expand All @@ -261,8 +244,7 @@ node llm_demo.js ~/qwen2.0_1.5b/config.json ~/qwen2.0_1.5b/prompt.txt

#### 推理用法
`llm_demo`的用法如下:
pc端直接推理
```bash
```
# 使用config.json
## 交互式聊天
./llm_demo model_dir/config.json
Expand All @@ -276,16 +258,15 @@ pc端直接推理
./llm_demo model_dir/llm.mnn prompt.txt
```

android手机端adb推理用法:
```bash
# 利用adb push将链接库push到手机上
adb shell mkdir /data/local/tmp/llm
adb push llm_demo ppl_demo libllm.so libMNN_CL.so libMNN_Express.so libMNN.so tools/cv/libMNNOpenCV.so /data/local/tmp/llm
```

- 对于视觉大模型,在prompt中嵌入图片输入
```
<img>https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg</img>介绍一下图片里的内容
# 指定图片大小
<img><hw>280, 420</hw>https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg</img>介绍一下图片里的内容
```
- 对于音频大模型,在prompt中嵌入音频输入
```
<audio>https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/translate_to_chinese.wav</audio>介绍一下音频里的内容
```

#### GPTQ权重加载
Expand Down
3 changes: 2 additions & 1 deletion docs/transformers/models.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,4 +47,5 @@
| [reader-lm-0.5b](https://huggingface.co/jinaai/reader-lm-0.5b) | [Q4_1](https://modelscope.cn/models/MNN/reader-lm-0.5b-MNN) | [Q4_1](https://huggingface.co/taobao-mnn/reader-lm-0.5b-MNN) |
| [reader-lm-1.5b](https://huggingface.co/jinaai/reader-lm-1.5b) | [Q4_1](https://modelscope.cn/models/MNN/reader-lm-1.5b-MNN) | [Q4_1](https://huggingface.co/taobao-mnn/reader-lm-1.5b-MNN) |
| [TinyLlama-1.1B-Chat-v1.0](https://modelscope.cn/models/AI-ModelScope/TinyLlama-1.1B-Chat-v1.0/summary) | [Q4_1](https://modelscope.cn/models/MNN/TinyLlama-1.1B-Chat-MNN) | [Q4_1](https://huggingface.co/taobao-mnn/TinyLlama-1.1B-Chat-MNN) |
| [Yi-6B-Chat](https://modelscope.cn/models/01ai/Yi-6B-Chat/summary) | [Q4_1](https://modelscope.cn/models/MNN/Yi-6B-Chat-MNN) | [Q4_1](https://huggingface.co/taobao-mnn/Yi-6B-Chat-MNN) |
| [Yi-6B-Chat](https://modelscope.cn/models/01ai/Yi-6B-Chat/summary) | [Q4_1](https://modelscope.cn/models/MNN/Yi-6B-Chat-MNN) | [Q4_1](https://huggingface.co/taobao-mnn/Yi-6B-Chat-MNN) |
| [QwQ-32B-Preview](https://modelscope.cn/models/Qwen/QwQ-32B-Preview/summary) | [Q4_1](https://modelscope.cn/models/MNN/QwQ-32B-Preview-MNN) | [Q4_1](https://huggingface.co/taobao-mnn/QwQ-32B-Preview-MNN) |
23 changes: 19 additions & 4 deletions express/MathOp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1208,7 +1208,7 @@ VARP _LinSpace(VARP start, VARP stop, VARP num) {
return (Variable::create(Expr::create(std::move(op), {start, stop, num})));
}

VARP _EltwiseProdInt8(VARP x, VARP y,
VARP _EltwiseProdInt8(VARP x, VARP y,
std::vector<int8_t> x_weight, std::vector<int32_t> x_bias, std::vector<float> x_scale, std::vector<float> x_tensorScale,
std::vector<int8_t> y_weight, std::vector<int32_t> y_bias, std::vector<float> y_scale, std::vector<float> y_tensorScale,
std::vector<int8_t> output_weight, std::vector<int32_t> output_bias, std::vector<float> output_scale, std::vector<float> output_tensorScale)
Expand All @@ -1219,7 +1219,7 @@ VARP _EltwiseProdInt8(VARP x, VARP y,
output_weight, output_bias, output_scale, output_tensorScale);
}

VARP _EltwiseSumInt8(VARP x, VARP y,
VARP _EltwiseSumInt8(VARP x, VARP y,
std::vector<int8_t> x_weight, std::vector<int32_t> x_bias, std::vector<float> x_scale, std::vector<float> x_tensorScale,
std::vector<int8_t> y_weight, std::vector<int32_t> y_bias, std::vector<float> y_scale, std::vector<float> y_tensorScale,
std::vector<int8_t> output_weight, std::vector<int32_t> output_bias, std::vector<float> output_scale, std::vector<float> output_tensorScale)
Expand All @@ -1230,7 +1230,7 @@ VARP _EltwiseSumInt8(VARP x, VARP y,
output_weight, output_bias, output_scale, output_tensorScale);
}

VARP _EltwiseSubInt8(VARP x, VARP y,
VARP _EltwiseSubInt8(VARP x, VARP y,
std::vector<int8_t> x_weight, std::vector<int32_t> x_bias, std::vector<float> x_scale, std::vector<float> x_tensorScale,
std::vector<int8_t> y_weight, std::vector<int32_t> y_bias, std::vector<float> y_scale, std::vector<float> y_tensorScale,
std::vector<int8_t> output_weight, std::vector<int32_t> output_bias, std::vector<float> output_scale, std::vector<float> output_tensorScale)
Expand All @@ -1241,7 +1241,7 @@ VARP _EltwiseSubInt8(VARP x, VARP y,
output_weight, output_bias, output_scale, output_tensorScale);
}

VARP _EltwiseMaxInt8(VARP x, VARP y,
VARP _EltwiseMaxInt8(VARP x, VARP y,
std::vector<int8_t> x_weight, std::vector<int32_t> x_bias, std::vector<float> x_scale, std::vector<float> x_tensorScale,
std::vector<int8_t> y_weight, std::vector<int32_t> y_bias, std::vector<float> y_scale, std::vector<float> y_tensorScale,
std::vector<int8_t> output_weight, std::vector<int32_t> output_bias, std::vector<float> output_scale, std::vector<float> output_tensorScale)
Expand Down Expand Up @@ -1320,5 +1320,20 @@ VARP _Histogram(VARP x, int bin, int min, int max, int channel) {
return (Variable::create(Expr::create(std::move(op), {x})));
}

#ifdef MNN_BUILD_AUDIO
VARP _Stft(VARP sample, VARP window, int n_fft, int hop_length, bool abs) {
std::unique_ptr<OpT> op(new OpT);
op->type = OpType_Stft;
op->main.type = OpParameter_StftParam;
auto param = new StftParamT;
param->n_fft = n_fft;
param->hop_length = hop_length;
param->abs = abs;
op->main.value = param;
EXPRP expr = Expr::create(std::move(op), {sample, window});
return Variable::create(expr);
}
#endif

} // namespace Express
} // namespace MNN
Loading

0 comments on commit 3ee08ac

Please sign in to comment.