未验证 提交 a22633e9 编写于 作者: O openharmony_ci 提交者: Gitee

回退 'Pull Request !6553 : AI子系统NNRt北向接口测试用例'

上级 44e50725
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
group("ai") {
testonly = true
deps = [ "neural_network_runtime:neural_network_runtime" ]
}
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
group("neural_network_runtime") {
testonly = true
deps = [
"interface:ActsAiNnrtFunctionTest",
"stability:ActsAiNnrtStabilityTest",
]
}
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CONST_H
#define CONST_H
#include <string>
#include <vector>
namespace OHOS {
namespace NeuralNetworkRuntime {
namespace Test {
const uint32_t ADD_DATA_LENGTH = 4 * sizeof(float);
const uint32_t AVG_INPUT_LENGTH = 9 * sizeof(float);
const std::vector<int32_t> TENSOR_SHAPE = {1, 2, 2, 1};
const std::vector<int32_t> PARAM_INDEX = {2};
const std::vector<int32_t> INPUT_INDEX = {0, 1};
const std::vector<int32_t> OUTPUT_INDEX = {3};
const int32_t ELEMENT_COUNT = 4;
const std::string CACHE_DIR = "./cache";
const std::string CACHE_PATH = CACHE_DIR + "/0.nncache";
const std::string CACHE_INFO_PATH = CACHE_DIR + "/cache_info.nncache";
const uint32_t NO_DEVICE_COUNT = 0;
const int STRESS_COUNT = 100000;
const int PRINT_FREQ = 500;
} // namespace Test
} // namespace NeuralNetworkRuntime
} // namespace OHOS
#endif // CONST_H
\ No newline at end of file
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "const.h"
#include "mock_idevice.h"
namespace OHOS {
namespace HDI {
namespace Nnrt {
namespace V1_0 {
sptr<INnrtDevice> INnrtDevice::Get(bool isStub)
{
return INnrtDevice::Get("mock_device_service", isStub);
}
sptr<INnrtDevice> INnrtDevice::Get(const std::string &serviceName, bool isStub)
{
if (isStub) {
return nullptr;
}
sptr<INnrtDevice> mockIDevice = sptr<MockIDevice>(MockIDevice::GetInstance());
return mockIDevice;
}
MockIDevice::~MockIDevice()
{
for (auto ash : m_ashmems) {
ash.second->UnmapAshmem();
ash.second->CloseAshmem();
}
}
MockIDevice *MockIDevice::GetInstance()
{
static MockIDevice iDevice;
return &iDevice;
}
void MockIDevice::SetFP16Supported(bool isSupported)
{
m_fp16 = isSupported;
}
void MockIDevice::SetPerformanceSupported(bool isSupported)
{
m_performance = isSupported;
}
void MockIDevice::SetPrioritySupported(bool isSupported)
{
m_priority = isSupported;
}
void MockIDevice::SetModelCacheSupported(bool isSupported)
{
m_cache = isSupported;
}
void MockIDevice::SetOperationsSupported(std::vector<bool> isSupported)
{
m_operations = isSupported;
}
void MockIDevice::SetDynamicInputSupported(bool isSupported)
{
m_dynamic = isSupported;
}
int32_t MockIDevice::GetDeviceName(std::string& name)
{
name = "Device-CPU";
return HDF_SUCCESS;
}
int32_t MockIDevice::GetVendorName(std::string& name)
{
name = "TestVendor";
return HDF_SUCCESS;
}
int32_t MockIDevice::GetDeviceType(DeviceType& deviceType)
{
deviceType = DeviceType::CPU;
return HDF_SUCCESS;
}
int32_t MockIDevice::GetDeviceStatus(DeviceStatus& status)
{
status = DeviceStatus::AVAILABLE;
return HDF_SUCCESS;
}
int32_t MockIDevice::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion)
{
majorVersion = 1;
minorVersion = 0;
return HDF_SUCCESS;
}
int32_t MockIDevice::GetSupportedOperation(const Model& model, std::vector<bool>& ops)
{
ops = m_operations;
return HDF_SUCCESS;
}
int32_t MockIDevice::IsFloat16PrecisionSupported(bool& isSupported)
{
isSupported = m_fp16;
return HDF_SUCCESS;
}
int32_t MockIDevice::IsPerformanceModeSupported(bool& isSupported)
{
isSupported = m_performance;
return HDF_SUCCESS;
}
int32_t MockIDevice::IsPrioritySupported(bool& isSupported)
{
isSupported = m_priority;
return HDF_SUCCESS;
}
int32_t MockIDevice::IsDynamicInputSupported(bool& isSupported)
{
isSupported = m_dynamic;
return HDF_SUCCESS;
}
int32_t MockIDevice::IsModelCacheSupported(bool& isSupported)
{
isSupported = m_cache;
return HDF_SUCCESS;
}
int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer)
{
sptr<Ashmem> ashptr = Ashmem::CreateAshmem("allocateBuffer", length);
if (ashptr == nullptr) {
LOGE("[NNRtTest] Create shared memory failed.");
return HDF_FAILURE;
}
if (!ashptr->MapReadAndWriteAshmem()) {
LOGE("[NNRtTest] Map allocate buffer failed.");
return HDF_FAILURE;
}
buffer.fd = ashptr->GetAshmemFd();
buffer.bufferSize = ashptr->GetAshmemSize();
buffer.offset = 0;
buffer.dataSize = length;
m_ashmems[buffer.fd] = ashptr;
m_bufferFd = buffer.fd;
return HDF_SUCCESS;
}
int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer)
{
auto ash = m_ashmems[buffer.fd];
ash->UnmapAshmem();
return HDF_SUCCESS;
}
int32_t MockIDevice::MemoryCopy(float* data, uint32_t length)
{
auto memManager = NeuralNetworkRuntime::MemoryManager::GetInstance();
auto memAddress = memManager->MapMemory(m_bufferFd, length);
if (memAddress == nullptr) {
LOGE("[NNRtTest] Map fd to address failed.");
return HDF_FAILURE;
}
auto ret = memcpy_s(memAddress, length, data, length);
if (ret != EOK) {
LOGE("[NNRtTest] MockIDevice memory cop failed.");
return HDF_FAILURE;
}
return HDF_SUCCESS;
}
int32_t MockIDevice::PrepareModel(const Model& model, const ModelConfig& config, sptr<IPreparedModel>& preparedModel)
{
preparedModel = new (std::nothrow) V1_0::MockIPreparedModel();
return HDF_SUCCESS;
}
int32_t MockIDevice::PrepareModelFromModelCache(const std::vector<SharedBuffer>& modelCache, const ModelConfig& config,
sptr<IPreparedModel>& preparedModel)
{
preparedModel = new (std::nothrow) V1_0::MockIPreparedModel();
return HDF_SUCCESS;
}
int32_t MockIPreparedModel::ExportModelCache(std::vector<SharedBuffer>& modelCache)
{
if (!modelCache.empty()) {
LOGE("[NNRtTest] The parameters of ExportModelCache should be an empty vector.");
return HDF_ERR_INVALID_PARAM;
}
uint8_t buffer[4] = {0, 1, 2, 3};
uint32_t size = sizeof(buffer);
sptr<Ashmem> cache = Ashmem::CreateAshmem("cache", size);
if (cache == nullptr) {
LOGE("[NNRtTest] Create shared memory failed.");
return HDF_ERR_MALLOC_FAIL;
}
bool ret = cache->MapReadAndWriteAshmem();
if (!ret) {
LOGE("[NNRtTest] Map fd to write cache failed.");
return HDF_FAILURE;
}
ret = cache->WriteToAshmem(buffer, size, 0);
cache->UnmapAshmem();
if (!ret) {
LOGE("[NNRtTest] Write cache failed.");
return HDF_FAILURE;
}
// SharedBuffer: fd, bufferSize, offset, dataSize
modelCache.emplace_back(SharedBuffer {cache->GetAshmemFd(), cache->GetAshmemSize(), 0, cache->GetAshmemSize()});
return HDF_SUCCESS;
}
int32_t MockIPreparedModel::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion)
{
majorVersion = 1;
minorVersion = 0;
return HDF_SUCCESS;
}
int32_t MockIPreparedModel::Run(const std::vector<IOTensor>& inputs, const std::vector<IOTensor>& outputs,
std::vector<std::vector<int32_t>>& outputsDims, std::vector<bool>& isOutputBufferEnough)
{
outputsDims = {{1, 2, 2, 1}};
isOutputBufferEnough = {true};
return HDF_SUCCESS;
}
} // namespace V1_0
} // namespace Nnrt
} // namespace HDI
} // namespace OHOS
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MOCK_IDEVICE_H
#define MOCK_IDEVICE_H
#include <iostream>
#include <hdi_support.h>
#include <string_ex.h>
#include <hdf_base.h>
#include <gtest/gtest.h>
#include <gmock/gmock.h>
#include "mindir_lite_graph.h"
#include "mindir.h"
#include "securec.h"
#include "refbase.h"
#include "common/log.h"
#include "frameworks/native/hdi_interfaces.h"
#include "frameworks/native/memory_manager.h"
#include "ashmem.h"
namespace OHOS {
namespace HDI {
namespace Nnrt {
namespace V1_0 {
class MockIDevice : public INnrtDevice {
public:
int32_t GetSupportedOperation(const Model& model, std::vector<bool>& ops) override;
int32_t IsFloat16PrecisionSupported(bool& isSupported) override;
int32_t IsPerformanceModeSupported(bool& isSupported) override;
int32_t IsPrioritySupported(bool& isSupported) override;
int32_t IsDynamicInputSupported(bool& isSupported) override;
int32_t IsModelCacheSupported(bool& isSupported) override;
int32_t AllocateBuffer(uint32_t length, SharedBuffer &buffer) override;
int32_t ReleaseBuffer(const SharedBuffer &buffer) override;
int32_t GetDeviceName(std::string& name) override;
int32_t GetVendorName(std::string& name) override;
int32_t GetDeviceType(DeviceType& deviceType) override;
int32_t GetDeviceStatus(DeviceStatus& status) override;
int32_t GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) override;
int32_t PrepareModel(const Model& model, const ModelConfig& config, sptr<IPreparedModel>& preparedModel) override;
int32_t PrepareModelFromModelCache(const std::vector<SharedBuffer>& modelCache, const ModelConfig& config,
sptr<IPreparedModel>& preparedModel) override;
int32_t MemoryCopy(float* data, uint32_t length);
void SetFP16Supported(bool isSupported);
void SetPerformanceSupported(bool isSupported);
void SetPrioritySupported(bool isSupported);
void SetModelCacheSupported(bool isSupported);
void SetOperationsSupported(std::vector<bool> isSupported);
void SetDynamicInputSupported(bool isSupported);
static MockIDevice *GetInstance();
MockIDevice() = default;
virtual ~MockIDevice();
private:
std::unordered_map<int, sptr<Ashmem>> m_ashmems;
int m_bufferFd;
bool m_fp16 = true;
bool m_performance = true;
bool m_priority = true;
bool m_cache = true;
bool m_dynamic = true;
std::vector<bool> m_operations{true};
};
class MockIPreparedModel : public IPreparedModel {
public:
int32_t ExportModelCache(std::vector<SharedBuffer>& modelCache) override;
int32_t Run(const std::vector<IOTensor>& inputs, const std::vector<IOTensor>& outputs,
std::vector<std::vector<int32_t>>& outputsDims, std::vector<bool>& isOutputBufferEnough) override;
int32_t GetVersion(uint32_t &majorVersion, uint32_t &minorVersion) override;
MockIPreparedModel() = default;
};
} // namespace V1_0
} // namespace Nnrt
} // namespace HDI
} // namespace OHOS
#endif // MOCK_IDEVICE_H
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MODEL_H
#define MODEL_H
#include "const.h"
#include "nnrt_utils.h"
namespace OHOS {
namespace NeuralNetworkRuntime {
namespace Test {
struct AddModel {
// ADD MODEL
float inputValue0[4] = {0, 1, 2, 3};
float inputValue1[4] = {0, 1, 2, 3};
int8_t activationValue = OH_NN_FUSED_NONE;
float outputValue[4] = {0};
float expectValue[4] = {0, 2, 4, 6};
OHNNOperandTest input0 = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, inputValue0, ADD_DATA_LENGTH};
OHNNOperandTest input1 = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, inputValue1, ADD_DATA_LENGTH};
OHNNOperandTest activation = {OH_NN_INT8, OH_NN_ADD_ACTIVATIONTYPE, {}, &activationValue, sizeof(int8_t)};
OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, TENSOR_SHAPE, outputValue, ADD_DATA_LENGTH};
OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ADD,
.operands = {input0, input1, activation, output},
.paramIndices = {2},
.inputIndices = {0, 1},
.outputIndices = {3}};
};
struct AvgPoolDynamicModel {
// AVG POOL MODEL
float inputValue[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
uint64_t kernelValue[2] = {2, 2};
uint64_t strideValue[2] = {1, 1};
int8_t padValue = 1;
int8_t activationValue = OH_NN_FUSED_NONE;
float outputValue[4] = {0};
float expectValue[4] = {2, 3, 5, 6};
OHNNOperandTest dynamicInput = {OH_NN_FLOAT32, OH_NN_TENSOR, {-1, -1, -1, -1}, inputValue, AVG_INPUT_LENGTH};
OHNNOperandTest kernel = {OH_NN_INT64, OH_NN_AVG_POOL_KERNEL_SIZE, {2}, kernelValue, sizeof(kernelValue)};
OHNNOperandTest strides = {OH_NN_INT64, OH_NN_AVG_POOL_STRIDE, {2}, strideValue, sizeof(strideValue)};
OHNNOperandTest padMode = {OH_NN_INT8, OH_NN_AVG_POOL_PAD_MODE, {}, &padValue, sizeof(padValue)};
OHNNOperandTest activation = {OH_NN_INT8, OH_NN_AVG_POOL_ACTIVATION_TYPE, {}, &activationValue, sizeof(int8_t)};
OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, {-1, -1, -1, -1}, outputValue, sizeof(outputValue)};
OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_AVG_POOL,
.operands = {dynamicInput, kernel, strides, padMode, activation, output},
.paramIndices = {1, 2, 3, 4},
.inputIndices = {0},
.outputIndices = {5}};
};
struct TopKModel {
// TopK Model
float valueX[6] = {0, 1, 2, 3, 4, 5};
int8_t valueK = 2;
bool valueSorted = true;
float valueOutput1[2];
int32_t valueOutput2[2];
OHNNOperandTest x = {OH_NN_FLOAT32, OH_NN_TENSOR, {1, 6}, valueX, 6 * sizeof(float)};
OHNNOperandTest k = {OH_NN_INT8, OH_NN_TENSOR, {}, &valueK, sizeof(int8_t)};
OHNNOperandTest sorted = {OH_NN_BOOL, OH_NN_TOP_K_SORTED, {}, &valueSorted, sizeof(bool)};
OHNNOperandTest output1 = {OH_NN_FLOAT32, OH_NN_TENSOR, {1, 2}, valueOutput1, 2 * sizeof(float)};
OHNNOperandTest output2 = {OH_NN_INT32, OH_NN_TENSOR, {1, 2}, valueOutput2, 2 * sizeof(int32_t)};
OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_TOP_K,
.operands = {x, k, sorted, output1, output2},
.paramIndices = {2},
.inputIndices = {0, 1},
.outputIndices = {3, 4}};
};
class AddTopKModel {
// Build two ops Model
private:
AddModel addModel;
TopKModel topKModel;
public:
OHNNGraphArgsMulti graphArgs = {
.operationTypes = {OH_NN_OPS_ADD, OH_NN_OPS_TOP_K},
.operands = {{addModel.input0, addModel.input1, addModel.activation, addModel.output},
{topKModel.k, topKModel.sorted, topKModel.output1, topKModel.output2}},
.paramIndices = {{2}, {5}},
.inputIndices = {{0, 1}, {3, 4}},
.outputIndices = {{3}, {6, 7}},
.graphInput = {0, 1, 4},
.graphOutput = {6, 7}};
};
} // namespace Test
} // namespace NeuralNetworkRuntime
} // namespace OHOS
#endif // MODEL_H
\ No newline at end of file
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "nnrt_utils.h"
#include "const.h"
#include <fstream>
namespace OHOS {
namespace NeuralNetworkRuntime {
namespace Test {
OH_NN_UInt32Array TransformUInt32Array(const std::vector<uint32_t>& vector)
{
uint32_t* data = (vector.empty()) ? nullptr : const_cast<uint32_t*>(vector.data());
return {data, vector.size()};
}
int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs)
{
int ret = 0;
int opCnt = 0;
for (int j = 0; j < graphArgs.operationTypes.size(); j++) {
for (int i = 0; i < graphArgs.operands[j].size(); i++) {
const OHNNOperandTest &operandTem = graphArgs.operands[j][i];
auto quantParam = operandTem.quantParam;
OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(),
operandTem.shape.data(), quantParam, operandTem.type};
ret = OH_NNModel_AddTensor(model, &operand);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret);
return ret;
}
if (std::find(graphArgs.paramIndices[j].begin(), graphArgs.paramIndices[j].end(), opCnt) !=
graphArgs.paramIndices[j].end()) {
ret = OH_NNModel_SetTensorData(model, opCnt, operandTem.data, operandTem.length);
}
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_SetTensorData failed! ret=%d\n", ret);
return ret;
}
opCnt += 1;
}
auto paramIndices = TransformUInt32Array(graphArgs.paramIndices[j]);
auto inputIndices = TransformUInt32Array(graphArgs.inputIndices[j]);
auto outputIndices = TransformUInt32Array(graphArgs.outputIndices[j]);
ret = OH_NNModel_AddOperation(model, graphArgs.operationTypes[j], &paramIndices, &inputIndices,
&outputIndices);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_AddOperation failed! ret=%d\n", ret);
return ret;
}
}
auto graphInputs = TransformUInt32Array(graphArgs.graphInput);
auto graphOutputs = TransformUInt32Array(graphArgs.graphOutput);
ret = OH_NNModel_SpecifyInputsAndOutputs(model, &graphInputs, &graphOutputs);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_SpecifyInputsAndOutputs failed! ret=%d\n", ret);
return ret;
}
ret = OH_NNModel_Finish(model);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_Finish failed! ret=%d\n", ret);
return ret;
}
return ret;
}
int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs)
{
int ret = 0;
for (int i = 0; i < graphArgs.operands.size(); i++) {
const OHNNOperandTest &operandTem = graphArgs.operands[i];
auto quantParam = operandTem.quantParam;
OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(),
operandTem.shape.data(), quantParam, operandTem.type};
ret = OH_NNModel_AddTensor(model, &operand);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_AddTensor failed! ret=%d\n", ret);
return ret;
}
if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) !=
graphArgs.paramIndices.end()) {
ret = OH_NNModel_SetTensorData(model, i, operandTem.data, operandTem.length);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_SetTensorData failed! ret=%d\n", ret);
return ret;
}
}
}
auto paramIndices = TransformUInt32Array(graphArgs.paramIndices);
auto inputIndices = TransformUInt32Array(graphArgs.inputIndices);
auto outputIndices = TransformUInt32Array(graphArgs.outputIndices);
if (graphArgs.addOperation) {
ret = OH_NNModel_AddOperation(model, graphArgs.operationType, &paramIndices, &inputIndices,
&outputIndices);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_AddOperation failed! ret=%d\n", ret);
return ret;
}
}
if (graphArgs.specifyIO) {
ret = OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_SpecifyInputsAndOutputs failed! ret=%d\n", ret);
return ret;
}
}
if (graphArgs.build) {
ret = OH_NNModel_Finish(model);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNModel_Finish failed! ret=%d\n", ret);
return ret;
}
}
return ret;
}
int SetDevice(OH_NNCompilation *compilation)
{
int ret = 0;
const size_t *devicesID{nullptr};
uint32_t devicesCount{0};
ret = OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNDevice_GetAllDevicesID failed! ret=%d\n", ret);
return ret;
}
if (devicesCount <= NO_DEVICE_COUNT) {
return OH_NN_FAILED;
}
size_t targetDevice = devicesID[0]; // Use the first device in system test.
ret = OH_NNCompilation_SetDevice(compilation, targetDevice);
return ret;
}
int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam)
{
int ret = 0;
ret = SetDevice(compilation);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNCompilation_SetDevice failed! ret=%d\n", ret);
return ret;
}
// set cache
if (!compileParam.cacheDir.empty()) {
ret = OH_NNCompilation_SetCache(compilation, compileParam.cacheDir.c_str(),
compileParam.cacheVersion);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNCompilation_SetCache failed! ret=%d\n", ret);
return ret;
}
}
// set performance
if (compileParam.performanceMode != OH_NN_PERFORMANCE_NONE) {
ret = OH_NNCompilation_SetPerformanceMode(compilation, compileParam.performanceMode);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNCompilation_SetPerformanceMode failed! ret=%d\n", ret);
return ret;
}
}
// set priority
if (compileParam.priority != OH_NN_PRIORITY_NONE) {
ret = OH_NNCompilation_SetPriority(compilation, compileParam.priority);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNCompilation_SetPriority failed! ret=%d\n", ret);
return ret;
}
}
// enable fp16
if (compileParam.enableFp16) {
ret = OH_NNCompilation_EnableFloat16(compilation, compileParam.enableFp16);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNCompilation_EnableFloat16 failed! ret=%d\n", ret);
return ret;
}
}
// build
ret = OH_NNCompilation_Build(compilation);
return ret;
}
int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs,
float* expect)
{
OHOS::sptr<V1_0::MockIDevice> device = V1_0::MockIDevice::GetInstance();
int ret = 0;
uint32_t inputIndex = 0;
uint32_t outputIndex = 0;
for (auto i = 0; i < graphArgs.operands.size(); i++) {
const OHNNOperandTest &operandTem = graphArgs.operands[i];
auto quantParam = operandTem.quantParam;
OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(),
operandTem.shape.data(),
quantParam, operandTem.type};
if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) !=
graphArgs.inputIndices.end()) {
ret = OH_NNExecutor_SetInput(executor, inputIndex, &operand, operandTem.data,
operandTem.length);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNExecutor_SetInput failed! ret=%d\n", ret);
return ret;
}
inputIndex += 1;
} else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) !=
graphArgs.outputIndices.end()) {
ret = OH_NNExecutor_SetOutput(executor, outputIndex, operandTem.data, operandTem.length);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNExecutor_SetOutput failed! ret=%d\n", ret);
return ret;
}
ret = device->MemoryCopy(expect, operandTem.length);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] device set expect output failed! ret=%d\n", ret);
return ret;
}
outputIndex += 1;
}
}
ret = OH_NNExecutor_Run(executor);
return ret;
}
int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[],
float* expect)
{
OHOS::sptr<V1_0::MockIDevice> device = V1_0::MockIDevice::GetInstance();
int ret = 0;
uint32_t inputIndex = 0;
uint32_t outputIndex = 0;
for (auto i = 0; i < graphArgs.operands.size(); i++) {
const OHNNOperandTest &operandTem = graphArgs.operands[i];
auto quantParam = operandTem.quantParam;
OH_NN_Tensor operand = {operandTem.dataType, (uint32_t) operandTem.shape.size(),
operandTem.shape.data(),
quantParam, operandTem.type};
if (std::find(graphArgs.inputIndices.begin(), graphArgs.inputIndices.end(), i) !=
graphArgs.inputIndices.end()) {
OH_NN_Memory *inputMemory = OH_NNExecutor_AllocateInputMemory(executor, inputIndex,
operandTem.length);
ret = OH_NNExecutor_SetInputWithMemory(executor, inputIndex, &operand, inputMemory);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNExecutor_SetInputWithMemory failed! ret=%d\n", ret);
return ret;
}
memcpy_s(inputMemory->data, operandTem.length, (void *) operandTem.data, operandTem.length);
OHNNMemory[inputIndex] = inputMemory;
inputIndex += 1;
} else if (std::find(graphArgs.outputIndices.begin(), graphArgs.outputIndices.end(), i) !=
graphArgs.outputIndices.end()) {
OH_NN_Memory *outputMemory = OH_NNExecutor_AllocateOutputMemory(executor, outputIndex,
operandTem.length);
ret = OH_NNExecutor_SetOutputWithMemory(executor, outputIndex, outputMemory);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] OH_NNExecutor_SetOutputWithMemory failed! ret=%d\n", ret);
return ret;
}
ret = device->MemoryCopy(expect, operandTem.length);
if (ret != OH_NN_SUCCESS) {
LOGE("[NNRtTest] device set expect output failed! ret=%d\n", ret);
return ret;
}
OHNNMemory[inputIndex + outputIndex] = outputMemory;
outputIndex += 1;
}
}
ret = OH_NNExecutor_Run(executor);
return ret;
}
void Free(OH_NNModel *model, OH_NNCompilation *compilation, OH_NNExecutor *executor)
{
if (model != nullptr) {
OH_NNModel_Destroy(&model);
ASSERT_EQ(nullptr, model);
}
if (compilation != nullptr) {
OH_NNCompilation_Destroy(&compilation);
ASSERT_EQ(nullptr, compilation);
}
if (executor != nullptr) {
OH_NNExecutor_Destroy(&executor);
ASSERT_EQ(nullptr, executor);
}
}
PathType CheckPath(const std::string &path)
{
if (path.empty()) {
LOGI("CheckPath: path is null");
return PathType::NOT_FOUND;
}
struct stat buf{};
if (stat(path.c_str(), &buf) == 0) {
if (buf.st_mode & S_IFDIR) {
return PathType::DIR;
} else if (buf.st_mode & S_IFREG) {
return PathType::FILE;
} else {
return PathType::UNKNOWN;
}
}
LOGI("%s not found", path.c_str());
return PathType::NOT_FOUND;
}
bool DeleteFile(const std::string &path)
{
if (path.empty()) {
LOGI("DeleteFile: path is null");
return false;
}
if (CheckPath(path) == PathType::NOT_FOUND) {
LOGI("not found: %s", path.c_str());
return true;
}
if (remove(path.c_str()) == 0) {
LOGI("deleted: %s", path.c_str());
return true;
}
LOGI("delete failed: %s", path.c_str());
return false;
}
void CopyFile(const std::string &srcPath, const std::string &dstPath)
{
std::ifstream src(srcPath, std::ios::binary);
std::ofstream dst(dstPath, std::ios::binary);
dst << src.rdbuf();
}
std::string ConcatPath(const std::string &str1, const std::string &str2)
{
// boundary
if (str2.empty()) {
return str1;
}
if (str1.empty()) {
return str2;
}
// concat
char end = str1[str1.size() - 1];
if (end == '\\' or end == '/') {
return str1 + str2;
} else {
return str1 + '/' + str2;
}
}
void DeleteFolder(const std::string &path)
{
if (path.empty()) {
LOGI("DeletePath: path is null");
return;
}
DIR *dir = opendir(path.c_str());
// check is dir ?
if (dir == nullptr) {
LOGE("[NNRtTest] Can not open dir. Check path or permission! path: %s", path.c_str());
return;
}
struct dirent *file;
// read all the files in dir
std::vector <std::string> pathList;
while ((file = readdir(dir)) != nullptr) {
// skip "." and ".."
if (strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0) {
continue;
}
if (file->d_type == DT_DIR) {
std::string filePath = path + "/" + file->d_name;
DeleteFolder(filePath); // 递归执行
} else {
pathList.emplace_back(ConcatPath(path, file->d_name));
}
}
closedir(dir);
pathList.emplace_back(path);
LOGI("[Common] Delete folder %s", path.c_str());
for (auto &i : pathList) {
DeleteFile(i);
}
}
bool CreateFolder(const std::string &path)
{
if (path.empty()) {
LOGI("CreateFolder: path is empty");
return false;
}
LOGI("CreateFolder:%s", path.c_str());
mode_t mode = 0700;
for (int i = 1; i < path.size() - 1; i++) {
if (path[i] != '/') {
continue;
}
PathType ret = CheckPath(path.substr(0, i));
switch (ret) {
case PathType::DIR:
continue;
case PathType::NOT_FOUND:
LOGI("mkdir: %s", path.substr(0, i).c_str());
mkdir(path.substr(0, i).c_str(), mode);
break;
default:
LOGI("error: %s", path.substr(0, i).c_str());
return false;
}
}
mkdir(path.c_str(), mode);
return CheckPath(path) == PathType::DIR;
}
bool CheckOutput(const float* output, const float* expect)
{
if (output == nullptr || expect == nullptr) {
LOGE("[NNRtTest] output or expect is nullptr\n");
return false;
}
for (int i = 0; i < ELEMENT_COUNT; i++) {
if (std::abs(float(output[i]) - float(expect[i])) > 1e-8) {
for (int j = 0; j < ELEMENT_COUNT; j++) {
LOGE("[NNRtTest] output %d not match: expect:%f, actual:%f\n", j, float(expect[j]), float(output[j]));
}
return false;
}
}
return true;
}
} // namespace Test
} // namespace NeuralNetworkRuntime
} // namespace OHOS
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NNRT_UTILS_H
#define NNRT_UTILS_H
#include <dirent.h>
#include <sys/stat.h>
#include <gtest/gtest.h>
#include "interfaces/kits/c/neural_network_runtime.h"
#include "common/log.h"
#include "mock_idevice.h"
#include "const.h"
namespace OHOS {
namespace NeuralNetworkRuntime {
namespace Test {
namespace V1_0 = OHOS::HDI::Nnrt::V1_0;
struct OHNNOperandTest {
OH_NN_DataType dataType;
OH_NN_TensorType type;
std::vector<int32_t> shape;
void *data{nullptr};
int32_t length{0};
const OH_NN_QuantParam *quantParam = nullptr;
};
struct OHNNGraphArgs {
OH_NN_OperationType operationType;
std::vector<OHNNOperandTest> operands;
std::vector<uint32_t> paramIndices;
std::vector<uint32_t> inputIndices;
std::vector<uint32_t> outputIndices;
bool build = true;
bool specifyIO = true;
bool addOperation = true;
};
struct OHNNGraphArgsMulti {
std::vector<OH_NN_OperationType> operationTypes;
std::vector<std::vector<OHNNOperandTest>> operands;
std::vector<std::vector<uint32_t>> paramIndices;
std::vector<std::vector<uint32_t>> inputIndices;
std::vector<std::vector<uint32_t>> outputIndices;
std::vector<uint32_t> graphInput;
std::vector<uint32_t> graphOutput;
};
struct OHNNCompileParam {
int32_t deviceId = 0;
std::string cacheDir;
uint32_t cacheVersion = 0;
OH_NN_PerformanceMode performanceMode = OH_NN_PERFORMANCE_NONE;
OH_NN_Priority priority = OH_NN_PRIORITY_NONE;
bool enableFp16 = false;
};
int BuildSingleOpGraph(OH_NNModel *modelptr, const OHNNGraphArgs &args);
int ExecutorWithMemory(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, OH_NN_Memory *OHNNMemory[],
float* expect);
void Free(OH_NNModel *model = nullptr, OH_NNCompilation *compilation = nullptr, OH_NNExecutor *executor = nullptr);
int CompileGraphMock(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam);
int ExecuteGraphMock(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs, float* expect);
int SetDevice(OH_NNCompilation *compilation);
int BuildMultiOpGraph(OH_NNModel *model, const OHNNGraphArgsMulti &graphArgs);
OH_NN_UInt32Array GetUInt32Array(std::vector<uint32_t> indices);
bool CheckOutput(const float* output, const float* expect);
enum class PathType { FILE, DIR, UNKNOWN, NOT_FOUND };
PathType CheckPath(const std::string &path);
bool DeleteFile(const std::string &path);
void CopyFile(const std::string &srcPath, const std::string &dstPath);
std::string ConcatPath(const std::string &str1, const std::string &str2);
void DeleteFolder(const std::string &path);
bool CreateFolder(const std::string &path);
} // namespace Test
} // namespace NeuralNetworkRuntime
} // namespace OHOS
#endif // NNRT_UTILS_H
\ No newline at end of file
# Copyright (c) 2022 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import("//test/xts/tools/build/suite.gni")
ohos_moduletest_suite("ActsAiNnrtFunctionTest") {
testonly = true
module_out_path = "acts/nnrt"
sources = [
"../common/mock_idevice.cpp",
"../common/nnrt_utils.cpp",
"src/CompileTest.cpp",
"src/DeviceTest.cpp",
"src/ExecutorTest.cpp",
"src/MemoryTest.cpp",
"src/ModelTest.cpp",
]
include_dirs = [
"../common",
"//foundation/ai/neural_network_runtime",
"//foundation/ai/neural_network_runtime/third_party/include",
"//third_party/googletest/googletest/include",
"//third_party/googletest/googlemock/include",
"//third_party/mindspore/mindspore/lite/mindir/include",
]
deps = [
"//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime",
"//third_party/googletest:gmock",
"//third_party/googletest:gtest",
]
external_deps = [
"c_utils:utils",
"drivers_interface_nnrt:libnnrt_proxy_1.0",
"hdf_core:libhdf_utils",
"hdf_core:libhdi",
"hilog_native:libhilog",
"hitrace_native:libhitracechain",
"ipc:ipc_single",
"mindspore:mindir",
]
cflags = [ "-Wno-error" ]
}
{
"kits": [
{
"push": [
"ActsAiNnrtFunctionTest->/data/local/tmp/ActsAiNnrtFunctionTest"
],
"type": "PushKit"
}
],
"driver": {
"native-test-timeout": "120000",
"type": "CppTest",
"module-name": "ActsAiNnrtFunctionTest",
"runtime-hint": "1s",
"native-test-device-path": "/data/local/tmp"
},
"description": "Configuration for ActsAiNnrtFunctionTest Tests"
}
\ No newline at end of file
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cmath>
#include <cstdio>
#include <vector>
#include "nnrt_utils.h"
using namespace testing::ext;
using namespace OHOS::NeuralNetworkRuntime::Test;
class DeviceTest : public testing::Test {};
/**
* @tc.number : SUB_AI_NNRtt_Func_North_Device_DeviceID_0100
* @tc.name : 获取设备ID,*allDevicesID为nullptr
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceID_0100, Function | MediumTest | Level3)
{
uint32_t count{0};
OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(nullptr, &count);
EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
}
/**
* @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceID_0200
* @tc.name : 获取设备ID,**allDevicesID非nullptr
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceID_0200, Function | MediumTest | Level3)
{
const size_t allDeviceIds = 0;
const size_t *pAllDeviceIds = &allDeviceIds;
uint32_t count{0};
OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&pAllDeviceIds, &count);
EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
}
/**
* @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceID_0300
* @tc.name : 获取设备ID,获取设备ID,deviceCount为nullptr
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceID_0300, Function | MediumTest | Level3)
{
const size_t *allDeviceIds = nullptr;
OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&allDeviceIds, nullptr);
EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
}
/**
* @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceID_0400
* @tc.name : 获取设备ID,设备数量校验
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceID_0400, Function | MediumTest | Level2)
{
const size_t *allDeviceIds = nullptr;
uint32_t count{0};
OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&allDeviceIds, &count);
EXPECT_EQ(OH_NN_SUCCESS, ret);
uint32_t expectCount = 1;
EXPECT_EQ(expectCount, count);
}
/**
* @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceName_0100
* @tc.name : 获取硬件名称,deviceID不存在
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceName_0100, Function | MediumTest | Level3)
{
const size_t deviceID{100000};
const char *name = nullptr;
OH_NN_ReturnCode ret = OH_NNDevice_GetName(deviceID, &name);
EXPECT_EQ(OH_NN_FAILED, ret);
}
/**
* @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceName_0200
* @tc.name : 获取硬件名称,*name为nullprt
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceName_0200, Function | MediumTest | Level3)
{
const size_t *devicesID{nullptr};
uint32_t devicesCount{0};
ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount));
size_t targetDevice = devicesID[0];
OH_NN_ReturnCode ret = OH_NNDevice_GetName(targetDevice, nullptr);
EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
}
/**
* @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceName_0300
* @tc.name : 获取硬件名称,**name非nullptr
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceName_0300, Function | MediumTest | Level3)
{
const size_t *devicesID{nullptr};
uint32_t devicesCount{0};
ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount));
size_t targetDevice = devicesID[0];
const char *name = "name";
OH_NN_ReturnCode ret = OH_NNDevice_GetName(targetDevice, &name);
EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
}
/**
* @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceName_0400
* @tc.name : 获取硬件名称, 结果校验
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceName_0400, Function | MediumTest | Level1)
{
const size_t *devicesID{nullptr};
uint32_t devicesCount{0};
ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount));
size_t targetDevice = devicesID[0];
const char *name = nullptr;
std::string m_deviceName{"RK3568-CPU_Rockchip"};
OH_NN_ReturnCode ret = OH_NNDevice_GetName(targetDevice, &name);
EXPECT_EQ(OH_NN_SUCCESS, ret);
std::string sName(name);
EXPECT_EQ(m_deviceName, sName);
}
/**
* @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceType_0100
* @tc.name : 获取硬件类别,deviceType为nullprt
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceType_0100, Function | MediumTest | Level3)
{
const size_t *devicesID{nullptr};
uint32_t devicesCount{0};
ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount));
size_t targetDevice = devicesID[0];
OH_NN_ReturnCode ret = OH_NNDevice_GetType(targetDevice, nullptr);
EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
}
/**
* @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceType_0200
* @tc.name : 获取硬件类别,deviceID不存在
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceType_0200, Function | MediumTest | Level3)
{
const size_t deviceID{100000};
OH_NN_DeviceType type{OH_NN_OTHERS};
OH_NN_ReturnCode ret = OH_NNDevice_GetType(deviceID, &type);
EXPECT_EQ(OH_NN_INVALID_PARAMETER, ret);
}
/**
* @tc.number : SUB_AI_NNRt_Func_North_Device_DeviceType_0300
* @tc.name :获取硬件类别,结果校验
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_North_Device_DeviceType_0300, Function | MediumTest | Level1)
{
const size_t *devicesID{nullptr};
uint32_t devicesCount{0};
ASSERT_EQ(OH_NN_SUCCESS, OH_NNDevice_GetAllDevicesID(&devicesID, &devicesCount));
size_t targetDevice = devicesID[0];
OH_NN_DeviceType type{OH_NN_OTHERS};
OH_NN_ReturnCode ret = OH_NNDevice_GetType(targetDevice, &type);
EXPECT_EQ(OH_NN_SUCCESS, ret);
}
\ No newline at end of file
# Copyright (c) 2022 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import("//test/xts/tools/build/suite.gni")
ohos_moduletest_suite("ActsAiNnrtStabilityTest") {
testonly = true
module_out_path = "acts/nnrt"
sources = [
"../common/mock_idevice.cpp",
"../common/nnrt_utils.cpp",
"src/MultiThreadTest.cpp",
]
include_dirs = [
"../common",
"//foundation/ai/neural_network_runtime",
"//foundation/ai/neural_network_runtime/third_party/include",
"//third_party/googletest/googletest/include",
"//third_party/googletest/googlemock/include",
"//third_party/mindspore/mindspore/lite/mindir/include",
]
deps = [
"//foundation/ai/neural_network_runtime/frameworks:libneural_network_runtime",
"//third_party/googletest:gmock",
"//third_party/googletest:gtest",
]
external_deps = [
"c_utils:utils",
"drivers_interface_nnrt:libnnrt_proxy_1.0",
"hdf_core:libhdf_utils",
"hdf_core:libhdi",
"hilog_native:libhilog",
"hitrace_native:libhitracechain",
"ipc:ipc_single",
"mindspore:mindir",
]
cflags = [ "-Wno-error" ]
}
{
"kits": [
{
"push": [
"ActsAiNnrtStabilityTest->/data/local/tmp/ActsAiNnrtStabilityTest"
],
"type": "PushKit"
}
],
"driver": {
"native-test-timeout": "120000",
"type": "CppTest",
"module-name": "ActsAiNnrtStabilityTest",
"runtime-hint": "1s",
"native-test-device-path": "/data/local/tmp"
},
"description": "Configuration for ActsAiNnrtStabilityTest Tests"
}
\ No newline at end of file
/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cmath>
#include <cstdio>
#include <vector>
#include <thread>
#include "interfaces/kits/c/neural_network_runtime.h"
#include "nnrt_utils.h"
#include "model.h"
using namespace testing::ext;
using namespace OHOS::NeuralNetworkRuntime;
using namespace OHOS::NeuralNetworkRuntime::Test;
using namespace OHOS::HDI::Nnrt::V1_0;
class MultiThreadTest : public testing::Test {
public:
void SetUp()
{
}
void TearDown()
{
}
protected:
OHNNCompileParam compileParam;
AddModel addModel;
OHNNGraphArgs graphArgs = addModel.graphArgs;
};
void CompileModel(OH_NNCompilation *compilation, const OHNNCompileParam &compileParam)
{
ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation, compileParam));
}
void ExecuteModel(OH_NNExecutor *executor, const OHNNGraphArgs &graphArgs)
{
float addExpectValue[4] = {0, 1, 2, 3};
ASSERT_EQ(OH_NN_SUCCESS, ExecuteGraphMock(executor, graphArgs, addExpectValue));
}
/**
* @tc.number : SUB_AI_NNR_Reliability_North_Stress_0100
* @tc.name : 模型编译多线程并发长稳测试
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(MultiThreadTest, SUB_AI_NNR_Reliability_North_Stress_0100, Reliability | MediumTest | Level2)
{
for (int i = 0; i < STRESS_COUNT; i++) {
OH_NNModel *model1 = OH_NNModel_Construct();
ASSERT_NE(nullptr, model1);
ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model1, graphArgs));
OH_NNModel *model2 = OH_NNModel_Construct();
ASSERT_NE(nullptr, model2);
ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model2, graphArgs));
OH_NNCompilation *compilation1 = OH_NNCompilation_Construct(model1);
ASSERT_NE(nullptr, compilation1);
OH_NNCompilation *compilation2 = OH_NNCompilation_Construct(model2);
ASSERT_NE(nullptr, compilation2);
std::thread th1(CompileModel, compilation1, compileParam);
std::thread th2(CompileModel, compilation2, compileParam);
th1.join();
th2.join();
Free(model1, compilation1);
Free(model2, compilation2);
if (i % PRINT_FREQ == 0) {
printf("[NnrtTest] SUB_AI_NNR_Reliability_North_Stress_0100 times: %d/%d\n", i, STRESS_COUNT);
}
}
}
/**
* @tc.number : SUB_AI_NNR_Reliability_North_Stress_0200
* @tc.name : 模型推理多线程并发长稳测试
* @tc.desc : [C- SOFTWARE -0200]
*/
HWTEST_F(MultiThreadTest, SUB_AI_NNR_Reliability_North_Stress_0200, Reliability | MediumTest | Level2)
{
OH_NNModel *model1 = OH_NNModel_Construct();
ASSERT_NE(nullptr, model1);
ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model1, graphArgs));
OH_NNModel *model2 = OH_NNModel_Construct();
ASSERT_NE(nullptr, model2);
ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(model2, graphArgs));
OH_NNCompilation *compilation1 = OH_NNCompilation_Construct(model1);
ASSERT_NE(nullptr, compilation1);
ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation1, compileParam));
OH_NNCompilation *compilation2 = OH_NNCompilation_Construct(model2);
ASSERT_NE(nullptr, compilation2);
ASSERT_EQ(OH_NN_SUCCESS, CompileGraphMock(compilation2, compileParam));
for (int i = 0; i < STRESS_COUNT; i++) {
OH_NNExecutor *executor1 = OH_NNExecutor_Construct(compilation1);
ASSERT_NE(nullptr, executor1);
OH_NNExecutor *executor2 = OH_NNExecutor_Construct(compilation2);
ASSERT_NE(nullptr, executor2);
std::thread th1(ExecuteModel, executor1, graphArgs);
std::thread th2(ExecuteModel, executor2, graphArgs);
th1.join();
th2.join();
OH_NNExecutor_Destroy(&executor1);
ASSERT_EQ(nullptr, executor1);
OH_NNExecutor_Destroy(&executor2);
ASSERT_EQ(nullptr, executor2);
if (i % PRINT_FREQ == 0) {
printf("[NnrtTest] SUB_AI_NNR_Reliability_North_Stress_0200 times: %d/%d\n", i, STRESS_COUNT);
}
}
Free(model1, compilation1);
Free(model2, compilation2);
}
......@@ -14,7 +14,6 @@
import("//test/xts/tools/build/suite.gni")
_all_test_packages = [
"${ACTS_ROOT}/ai:ai",
"${ACTS_ROOT}/arkXtest:arkXtest",
"${ACTS_ROOT}/global:global",
"${ACTS_ROOT}/security:security",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册