提交 c8e38792 编写于 作者: R Raaj 提交者: Gines

Python fix (#674)

* Bug fixes for Python Windows and Cmake
* Update to python doc
* Added BUILD_DLL
上级 62eebd63
......@@ -235,9 +235,12 @@ option(DOWNLOAD_HAND_MODEL "Download hand model." ON)
option(BUILD_EXAMPLES "Build OpenPose examples." ON)
option(BUILD_DOCS "Build OpenPose documentation." OFF)
option(BUILD_PYTHON "Build OpenPose python." OFF)
if (WIN32)
option(BUILD_DLL "Copy all required DLL files into the same folder." ON)
endif ()
# Build as shared library
option(BUILD_SHARED_LIBS "Build as shared lib" ON)
option(BUILD_SHARED_LIBS "Build as shared lib." ON)
# Speed profiler
option(PROFILER_ENABLED "If enabled, OpenPose will be able to print out speed information at runtime." OFF)
......@@ -467,14 +470,18 @@ if (WIN32)
find_library(Caffe_Proto_LIB caffeproto HINTS ${FIND_LIB_PREFIX}/caffe/lib)
endif (${GPU_MODE} MATCHES "CPU_ONLY")
endif (${GPU_MODE} MATCHES "OPENCL")
if (${GPU_MODE} MATCHES "OPENCL")
unset(BOOST_SYSTEM_LIB_RELEASE CACHE)
unset(BOOST_SYSTEM_LIB_DEBUG CACHE)
find_library(BOOST_SYSTEM_LIB_RELEASE boost_system-vc140-mt-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)
find_library(BOOST_SYSTEM_LIB_DEBUG boost_system-vc140-mt-gd-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)
endif (${GPU_MODE} MATCHES "OPENCL")
# Boost DepCopy over required DLL F
if (${GPU_MODE} MATCHES "CPU_ONLY" OR ${GPU_MODE} MATCHES "OPENCL" OR BUILD_PYTHON)
find_library(BOOST_SYSTEM_LIB_RELEASE libboost_system-vc140-mt-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)
find_library(BOOST_SYSTEM_LIB_DEBUG libboost_system-vc140-mt-gd-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)
find_library(BOOST_FILESYSTEM_LIB_RELEASE libboost_filesystem-vc140-mt-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)
find_library(BOOST_FILESYSTEM_LIB_DEBUG libboost_filesystem-vc140-mt-gd-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)
else ()
set(BOOST_SYSTEM_LIB_RELEASE "")
set(BOOST_SYSTEM_LIB_DEBUG "")
set(BOOST_FILESYSTEM_LIB_RELEASE "")
set(BOOST_FILESYSTEM_LIB_DEBUG "")
endif ()
if (WITH_3D_RENDERER)
find_library(GLUT_LIBRARY freeglut HINTS ${FIND_LIB_PREFIX}/freeglut/lib)
message(STATUS "\${GLUT_LIBRARY} = ${GLUT_LIBRARY}")
......@@ -507,6 +514,32 @@ if (WIN32)
set(SPINNAKER_INCLUDE_DIRS "3rdparty/windows/spinnaker/include")
endif (WITH_FLIR_CAMERA)
set(Caffe_FOUND 1)
# Build DLL Must be on if Build Python is on
if (BUILD_PYTHON)
if (NOT BUILD_DLL)
message(FATAL_ERROR "BUILD_DLL must be turned on to as well to build python library")
endif ()
endif ()
# Auto copy DLLs
if (BUILD_DLL)
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
# Auto copy DLLs
if (${GPU_MODE} MATCHES "CUDA")
file(GLOB CAFFE_DLL "${CMAKE_SOURCE_DIR}/3rdparty/windows/caffe/bin/*.dll")
elseif (${GPU_MODE} MATCHES "OPENCL")
file(GLOB CAFFE_DLL "${CMAKE_SOURCE_DIR}/3rdparty/windows/caffe_opencl/bin/*.dll")
elseif (${GPU_MODE} MATCHES "CPU_ONLY")
file(GLOB CAFFE_DLL "${CMAKE_SOURCE_DIR}/3rdparty/windows/caffe_cpu/bin/*.dll")
endif ()
file(GLOB OPENCV_DLL "${CMAKE_SOURCE_DIR}/3rdparty/windows/opencv/x64/vc14/bin/*.dll")
file(GLOB OPENCV3PTY_DLL "${CMAKE_SOURCE_DIR}/3rdparty/windows/caffe3rdparty/lib/*.dll")
file(COPY ${CAFFE_DLL} DESTINATION ${CMAKE_BINARY_DIR}/lib)
file(COPY ${OPENCV_DLL} DESTINATION ${CMAKE_BINARY_DIR}/lib)
file(COPY ${OPENCV3PTY_DLL} DESTINATION ${CMAKE_BINARY_DIR}/lib)
endif ()
endif (WIN32)
......@@ -739,11 +772,16 @@ if (USE_MKL)
endif (USE_MKL)
if (${GPU_MODE} MATCHES "OPENCL")
set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries} ${CMAKE_THREAD_LIBS_INIT} ${OpenCL_LIBRARIES})
if (WIN32)
set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries}
debug ${BOOST_SYSTEM_LIB_DEBUG} optimized ${BOOST_SYSTEM_LIB_RELEASE})
endif (WIN32)
endif (${GPU_MODE} MATCHES "OPENCL")
# Boost
if (WIN32)
if (${GPU_MODE} MATCHES "CPU_ONLY" OR ${GPU_MODE} MATCHES "OPENCL" OR BUILD_PYTHON)
set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries}
debug ${BOOST_SYSTEM_LIB_RELEASE} optimized ${BOOST_SYSTEM_LIB_RELEASE})
set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries}
debug ${BOOST_FILESYSTEM_LIB_RELEASE} optimized ${BOOST_FILESYSTEM_LIB_RELEASE})
endif ()
endif (WIN32)
# 3-D
if (WITH_3D_ADAM_MODEL)
set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries}
......
......@@ -9,14 +9,23 @@ OpenPose Python Module
## Introduction
This experimental module exposes a Python API for OpenPose. This allows you to construct an OpenPose object, pass in a numpy array for an image, and get a numpy array of the pose positions. This API also exposes an API that allows you to directly pass in heatmaps from a network and extract poses out of it.
This experimental module exposes a Python API for OpenPose. This allows you to construct an OpenPose object, pass in a numpy array for an image, and get a numpy array of the pose positions. This API also exposes an API that allows you to directly pass in heatmaps from a network and extract poses out of it (Requires Python Caffe to be installed seperately)
At present the Python API only supports body pose. Hands and Face will be added in the future.
## Installation
Check [doc/installation.md#python-module](./installation.md#python-module) for installation steps.
To simply test the OpenPose API in your project without installation, ensure that the line `sys.path.append('{OpenPose_path}/python')` is set in your *.py files, where `{OpenPose_path}` points to your build folder of OpenPose. Take a look at `build/examples/tutorial_pose/1_extract_pose.py` for an example.
On an Ubuntu or OSX based system, you may use it globally. Running `sudo make install` will install OpenPose by default into `/usr/local/python`. You can set this into your python path and start using it at any location.
The Python API requires Numpy for array management, and OpenCV for image loading. They can be installed via:
```
pip install numpy
pip install opencv-python
```
## Compatibility
The OpenPose Python module is compatible with both Python 2 and Python 3. In addition, it will also run in all OpenPose compatible operating systems.
......@@ -26,56 +35,16 @@ The OpenPose Python module is compatible with both Python 2 and Python 3. In add
## Testing
Two examples can be found in `build/examples/tutorial_python` in your build folder. Navigate directly to this path to run examples.
- `1_extract_pose` demonstrates a simple use of the API.
- `2_pose_from_heatmaps` demonstrates constructing pose from heatmaps from the caffe network.
- `1_extract_pose` demonstrates a simple use of the API.
- `2_pose_from_heatmaps` demonstrates constructing pose from heatmaps from the caffe network. (Requires Python Caffe to be installed seperately)
```
# From command line
cd build/examples/tutorial_python
python
python 1_extract_pose.py
```
```python
# From Python
# It requires OpenCV installed for Python
import cv2
import os
import sys
# Remember to add your installation path here
# Option a
sys.path.append('{OpenPose_path}/python')
# Option b
# If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
# sys.path.append('/usr/local/python')
from openpose import *
# Parameters for OpenPose. Take a look at C++ OpenPose example for meaning of components. Ensure all below are filled
params = dict()
params["logging_level"] = 3
params["output_resolution"] = "-1x-1"
params["net_resolution"] = "-1x368"
params["model_pose"] = "BODY_25"
params["alpha_pose"] = 0.6
params["scale_gap"] = 0.3
params["scale_number"] = 1
params["render_threshold"] = 0.05
params["num_gpu_start"] = 0
# If GPU version is built, and multiple GPUs are available, set the ID here
params["disable_blending"] = False
params["default_model_folder"] = "/home/user/openpose/models"
# Construct OpenPose object allocates GPU memory
openpose = OpenPose(params)
while 1:
# Read new image
img = cv2.imread("image.png")
# Output keypoints and the image with the human skeleton blended on it
keypoints, output_image = openpose.forward(img, True)
# Print the human pose keypoints, i.e., a [#people x #keypoints x 3]-dimensional numpy object with the keypoints of all the people on that image
print keypoints
# Display the image
cv2.imshow("output", output_image)
cv2.waitKey(15)
```
## Code Sample
See `examples/tutorial_python/1_extract_pose.py`.
# From Python
# It requires OpenCV installed for Python
import sys
import cv2
import os
from sys import platform
# Remember to add your installation path here
# Option a
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append('../../python')
from openpose import *
if platform == "win32": sys.path.append(dir_path + '/../../python/openpose/');
else: sys.path.append('../../python');
# Option b
# If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
# sys.path.append('/usr/local/python')
# Parameters for OpenPose. Take a look at C++ OpenPose example for meaning of components. Ensure all below are filled
from openpose import *
params = dict()
params["logging_level"] = 3
params["output_resolution"] = "-1x-1"
params["net_resolution"] = "-1x368"
params["model_pose"] = "COCO"
params["model_pose"] = "BODY_25"
params["alpha_pose"] = 0.6
params["scale_gap"] = 0.3
params["scale_number"] = 1
params["render_threshold"] = 0.05
# If GPU version is built, and multiple GPUs are available, set the ID here
params["num_gpu_start"] = 0
params["disable_blending"] = False
# Ensure you point to the correct path where models are located
params["default_model_folder"] = dir_path + "/../../../models/"
# Construct OpenPose object allocates GPU memory
openpose = OpenPose(params)
img = cv2.imread(dir_path + "/../../../examples/media/COCO_val2014_000000000192.jpg")
arr, output_image = openpose.forward(img, True)
print arr
while 1:
# Read new image
img = cv2.imread("image.png")
# Output keypoints and the image with the human skeleton blended on it
keypoints, output_image = openpose.forward(img, True)
# Print the human pose keypoints, i.e., a [#people x #keypoints x 3]-dimensional numpy object with the keypoints of all the people on that image
print(keypoints)
# Display the image
cv2.imshow("output", output_image)
cv2.waitKey(15)
from sys import platform
import sys
try:
import caffe
except ImportError:
print("This sample can only be run if Python Caffe if available on your system")
print("Currently OpenPose does not compile Python Caffe. This may be supported in the future")
sys.exit(-1)
import os
os.environ["GLOG_minloglevel"] = "1"
import caffe
......@@ -36,7 +45,7 @@ caffe.set_device(0)
nets = []
for scale in scales:
nets.append(caffe.Net(Param.prototxt, Param.caffemodel, caffe.TEST))
print "Net loaded"
print("Net loaded")
# Test Function
first_run = True
......@@ -57,7 +66,7 @@ def func(frame):
net.reshape()
first_run = False
print "Reshaped"
print("Reshaped")
# Forward pass to get heatmaps
heatmaps = []
......
......@@ -4,7 +4,7 @@ set(PYTHON_FILES
_openpose.cpp)
add_library(_openpose SHARED ${PYTHON_FILES})
target_link_libraries(_openpose openpose ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} ${Caffe_LIBS} ${MKL_LIBS} ${GLUT_LIBRARY} ${SPINNAKER_LIB} ${OpenCL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
target_link_libraries(_openpose openpose ${OpenPose_3rdparty_libraries})
SET_TARGET_PROPERTIES(_openpose PROPERTIES PREFIX "")
configure_file(openpose.py openpose.py)
configure_file(__init__.py __init__.py)
......
#ifndef OPENPOSE_PYTHON_HPP
#define OPENPOSE_PYTHON_HPP
#define BOOST_DATE_TIME_NO_LIB
// OpenPose dependencies
#include <openpose/core/headers.hpp>
......@@ -13,12 +14,18 @@
#include <openpose/net/nmsCaffe.hpp>
#include <openpose/net/resizeAndMergeCaffe.hpp>
#include <openpose/pose/bodyPartConnectorCaffe.hpp>
#include <boost/make_shared.hpp>
#include <openpose/pose/poseParameters.hpp>
#include <openpose/pose/enumClasses.hpp>
#include <openpose/pose/poseExtractor.hpp>
#include <openpose/gpu/cuda.hpp>
#include <openpose/gpu/opencl.hcl>
#include <openpose/core/macros.hpp>
#ifdef _WIN32
#define OP_EXPORT __declspec(dllexport)
#else
#define OP_EXPORT
#endif
#define default_logging_level 3
#define default_output_resolution "-1x-1"
......@@ -33,8 +40,7 @@
#define default_model_folder "models/"
// Todo, have GPU Number, handle, OpenCL/CPU Cases
class OpenPose{
OP_API class OpenPose {
public:
std::unique_ptr<op::PoseExtractorCaffe> poseExtractorCaffe;
std::unique_ptr<op::PoseCpuRenderer> poseRenderer;
......@@ -62,12 +68,12 @@ public:
int FLAGS_num_gpu_start = default_num_gpu_start,
int FLAGS_disable_blending = default_disable_blending,
std::string FLAGS_model_folder = default_model_folder
){
) {
mGpuID = FLAGS_num_gpu_start;
#ifdef USE_CUDA
#ifdef USE_CUDA
caffe::Caffe::set_mode(caffe::Caffe::GPU);
caffe::Caffe::SetDevice(mGpuID);
#elif USE_OPENCL
#elif USE_OPENCL
caffe::Caffe::set_mode(caffe::Caffe::GPU);
std::vector<int> devices;
const int maxNumberGpu = op::OpenCL::getTotalGPU();
......@@ -76,9 +82,9 @@ public:
caffe::Caffe::SetDevices(devices);
caffe::Caffe::SelectDevice(mGpuID, true);
op::OpenCL::getInstance(mGpuID, CL_DEVICE_TYPE_GPU, true);
#else
#else
caffe::Caffe::set_mode(caffe::Caffe::CPU);
#endif
#endif
op::log("OpenPose Library Python Wrapper", op::Priority::High);
// ------------------------- INITIALIZATION -------------------------
// Step 1 - Set logging level
......@@ -104,18 +110,18 @@ public:
// Step 3 - Initialize all required classes
scaleAndSizeExtractor = std::unique_ptr<op::ScaleAndSizeExtractor>(new op::ScaleAndSizeExtractor(netInputSize, outputSize, FLAGS_scale_number, FLAGS_scale_gap));
poseExtractorCaffe = std::unique_ptr<op::PoseExtractorCaffe>(new op::PoseExtractorCaffe{poseModel, FLAGS_model_folder, FLAGS_num_gpu_start});
poseExtractorCaffe = std::unique_ptr<op::PoseExtractorCaffe>(new op::PoseExtractorCaffe{ poseModel, FLAGS_model_folder, FLAGS_num_gpu_start });
poseRenderer = std::unique_ptr<op::PoseCpuRenderer>(new op::PoseCpuRenderer{poseModel, (float)FLAGS_render_threshold, !FLAGS_disable_blending,
(float)FLAGS_alpha_pose});
frameDisplayer = std::unique_ptr<op::FrameDisplayer>(new op::FrameDisplayer{"OpenPose Tutorial - Example 1", outputSize});
poseRenderer = std::unique_ptr<op::PoseCpuRenderer>(new op::PoseCpuRenderer{ poseModel, (float)FLAGS_render_threshold, !FLAGS_disable_blending,
(float)FLAGS_alpha_pose });
frameDisplayer = std::unique_ptr<op::FrameDisplayer>(new op::FrameDisplayer{ "OpenPose Tutorial - Example 1", outputSize });
// Custom
resizeAndMergeCaffe = std::unique_ptr<op::ResizeAndMergeCaffe<float>>(new op::ResizeAndMergeCaffe<float>{});
nmsCaffe = std::unique_ptr<op::NmsCaffe<float>>(new op::NmsCaffe<float>{});
bodyPartConnectorCaffe = std::unique_ptr<op::BodyPartConnectorCaffe<float>>(new op::BodyPartConnectorCaffe<float>{});
heatMapsBlob = {std::make_shared<caffe::Blob<float>>(1,1,1,1)};
peaksBlob = {std::make_shared<caffe::Blob<float>>(1,1,1,1)};
heatMapsBlob = { std::make_shared<caffe::Blob<float>>(1,1,1,1) };
peaksBlob = { std::make_shared<caffe::Blob<float>>(1,1,1,1) };
bodyPartConnectorCaffe->setPoseModel(poseModel);
// Step 4 - Initialize resources on desired thread (in this case single thread, i.e. we init resources here)
......@@ -130,24 +136,24 @@ public:
{
// Prepare spCaffeNetOutputBlobss
std::vector<caffe::Blob<float>*> caffeNetOutputBlobs(caffeNetOutputBlob.size());
for (auto i = 0u ; i < caffeNetOutputBlobs.size() ; i++)
for (auto i = 0u; i < caffeNetOutputBlobs.size(); i++)
caffeNetOutputBlobs[i] = caffeNetOutputBlob[i].get();
return caffeNetOutputBlobs;
}
catch (const std::exception& e)
{
op::error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return {};
return{};
}
}
void forward(const cv::Mat& inputImage, op::Array<float>& poseKeypoints, cv::Mat& displayImage, bool display = false){
void forward(const cv::Mat& inputImage, op::Array<float>& poseKeypoints, cv::Mat& displayImage, bool display = false) {
op::OpOutputToCvMat opOutputToCvMat;
op::CvMatToOpInput cvMatToOpInput;
op::CvMatToOpOutput cvMatToOpOutput;
if(inputImage.empty())
if (inputImage.empty())
op::error("Could not open or find the image: ", __LINE__, __FUNCTION__, __FILE__);
const op::Point<int> imageSize{inputImage.cols, inputImage.rows};
const op::Point<int> imageSize{ inputImage.cols, inputImage.rows };
// Step 2 - Get desired scale sizes
std::vector<double> scaleInputToNetInputs;
std::vector<op::Point<int>> netInputSizes;
......@@ -162,7 +168,7 @@ public:
poseExtractorCaffe->forwardPass(netInputArray, imageSize, scaleInputToNetInputs);
poseKeypoints = poseExtractorCaffe->getPoseKeypoints();
if(display){
if (display) {
auto outputArray = cvMatToOpOutput.createArray(inputImage, scaleInputToOutput, outputResolution);
// Step 5 - Render poseKeypoints
poseRenderer->renderPose(outputArray, poseKeypoints, scaleInputToOutput);
......@@ -171,9 +177,9 @@ public:
}
}
void poseFromHeatmap(const cv::Mat& inputImage, std::vector<boost::shared_ptr<caffe::Blob<float>>>& caffeNetOutputBlob, op::Array<float>& poseKeypoints, cv::Mat& displayImage, std::vector<op::Point<int>>& imageSizes){
void poseFromHeatmap(const cv::Mat& inputImage, std::vector<boost::shared_ptr<caffe::Blob<float>>>& caffeNetOutputBlob, op::Array<float>& poseKeypoints, cv::Mat& displayImage, std::vector<op::Point<int>>& imageSizes) {
// Get Scale
const op::Point<int> inputDataSize{inputImage.cols, inputImage.rows};
const op::Point<int> inputDataSize{ inputImage.cols, inputImage.rows };
// Convert to Ptr
//std::vector<boost::shared_ptr<caffe::Blob<float>>> a;
......@@ -181,20 +187,20 @@ public:
const auto caffeNetOutputBlobs = caffeNetSharedToPtr(caffeNetOutputBlob);
// To be called once only
resizeAndMergeCaffe->Reshape(caffeNetOutputBlobs, {heatMapsBlob.get()},
op::getPoseNetDecreaseFactor(poseModel), 1.f/1.f, true,
resizeAndMergeCaffe->Reshape(caffeNetOutputBlobs, { heatMapsBlob.get() },
op::getPoseNetDecreaseFactor(poseModel), 1.f / 1.f, true,
0);
nmsCaffe->Reshape({heatMapsBlob.get()}, {peaksBlob.get()}, op::getPoseMaxPeaks(poseModel),
nmsCaffe->Reshape({ heatMapsBlob.get() }, { peaksBlob.get() }, op::getPoseMaxPeaks(poseModel),
op::getPoseNumberBodyParts(poseModel), 0);
bodyPartConnectorCaffe->Reshape({heatMapsBlob.get(), peaksBlob.get()});
bodyPartConnectorCaffe->Reshape({ heatMapsBlob.get(), peaksBlob.get() });
// Normal
op::OpOutputToCvMat opOutputToCvMat;
op::CvMatToOpInput cvMatToOpInput;
op::CvMatToOpOutput cvMatToOpOutput;
if(inputImage.empty())
if (inputImage.empty())
op::error("Could not open or find the image: ", __LINE__, __FUNCTION__, __FILE__);
const op::Point<int> imageSize{inputImage.cols, inputImage.rows};
const op::Point<int> imageSize{ inputImage.cols, inputImage.rows };
// Step 2 - Get desired scale sizes
std::vector<double> scaleInputToNetInputs;
std::vector<op::Point<int>> netInputSizes;
......@@ -209,27 +215,27 @@ public:
// Run the modes
const std::vector<float> floatScaleRatios(scaleInputToNetInputs.begin(), scaleInputToNetInputs.end());
resizeAndMergeCaffe->setScaleRatios(floatScaleRatios);
std::vector<caffe::Blob<float>*> heatMapsBlobs{heatMapsBlob.get()};
std::vector<caffe::Blob<float>*> peaksBlobs{peaksBlob.get()};
#ifdef USE_CUDA
std::vector<caffe::Blob<float>*> heatMapsBlobs{ heatMapsBlob.get() };
std::vector<caffe::Blob<float>*> peaksBlobs{ peaksBlob.get() };
#ifdef USE_CUDA
resizeAndMergeCaffe->Forward_gpu(caffeNetOutputBlobs, heatMapsBlobs); // ~5ms
#elif USE_OPENCL
#elif USE_OPENCL
resizeAndMergeCaffe->Forward_ocl(caffeNetOutputBlobs, heatMapsBlobs); // ~5ms
#else
#else
resizeAndMergeCaffe->Forward_cpu(caffeNetOutputBlobs, heatMapsBlobs); // ~5ms
#endif
#endif
nmsCaffe->setThreshold((float)poseExtractorCaffe->get(op::PoseProperty::NMSThreshold));
#ifdef USE_CUDA
#ifdef USE_CUDA
nmsCaffe->Forward_gpu(heatMapsBlobs, peaksBlobs);// ~2ms
#elif USE_OPENCL
#elif USE_OPENCL
nmsCaffe->Forward_ocl(heatMapsBlobs, peaksBlobs);// ~2ms
#else
#else
nmsCaffe->Forward_cpu(heatMapsBlobs, peaksBlobs);// ~2ms
#endif
#endif
op::cudaCheck(__LINE__, __FUNCTION__, __FILE__);
float mScaleNetToOutput = 1./scaleInputToNetInputs[0];
float mScaleNetToOutput = 1. / scaleInputToNetInputs[0];
bodyPartConnectorCaffe->setScaleNetToOutput(mScaleNetToOutput);
bodyPartConnectorCaffe->setInterMinAboveThreshold(
(float)poseExtractorCaffe->get(op::PoseProperty::ConnectInterMinAboveThreshold)
......@@ -238,8 +244,8 @@ public:
bodyPartConnectorCaffe->setMinSubsetCnt((int)poseExtractorCaffe->get(op::PoseProperty::ConnectMinSubsetCnt));
bodyPartConnectorCaffe->setMinSubsetScore((float)poseExtractorCaffe->get(op::PoseProperty::ConnectMinSubsetScore));
bodyPartConnectorCaffe->Forward_cpu({heatMapsBlob.get(),
peaksBlob.get()},
bodyPartConnectorCaffe->Forward_cpu({ heatMapsBlob.get(),
peaksBlob.get() },
mPoseKeypoints, mPoseScores);
poseKeypoints = mPoseKeypoints;
......@@ -255,10 +261,10 @@ public:
extern "C" {
#endif
typedef void* c_OP;
op::Array<float> output;
typedef void* c_OP;
op::Array<float> output;
c_OP newOP(int logging_level,
OP_EXPORT c_OP newOP(int logging_level,
char* output_resolution,
char* net_resolution,
char* model_pose,
......@@ -269,48 +275,48 @@ c_OP newOP(int logging_level,
int num_gpu_start,
bool disable_blending,
char* model_folder
){
) {
return new OpenPose(logging_level, output_resolution, net_resolution, model_pose, alpha_pose,
scale_gap, scale_number, render_threshold, num_gpu_start, disable_blending, model_folder);
}
void delOP(c_OP op){
}
OP_EXPORT void delOP(c_OP op) {
delete (OpenPose *)op;
}
void forward(c_OP op, unsigned char* img, size_t rows, size_t cols, int* size, unsigned char* displayImg, bool display){
}
OP_EXPORT void forward(c_OP op, unsigned char* img, size_t rows, size_t cols, int* size, unsigned char* displayImg, bool display) {
OpenPose* openPose = (OpenPose*)op;
cv::Mat image(rows, cols, CV_8UC3, img);
cv::Mat displayImage(rows, cols, CV_8UC3, displayImg);
openPose->forward(image, output, displayImage, display);
if(output.getSize().size()){
if (output.getSize().size()) {
size[0] = output.getSize()[0];
size[1] = output.getSize()[1];
size[2] = output.getSize()[2];
}else{
}
else {
size[0] = 0; size[1] = 0; size[2] = 0;
}
if(display) memcpy(displayImg, displayImage.ptr(), sizeof(unsigned char)*rows*cols*3);
}
void getOutputs(c_OP op, float* array){
if(output.getSize().size())
memcpy(array, output.getPtr(), output.getSize()[0]*output.getSize()[1]*output.getSize()[2]*sizeof(float));
}
void poseFromHeatmap(c_OP op, unsigned char* img, size_t rows, size_t cols, unsigned char* displayImg, float* hm, int* size, float* ratios){
if (display) memcpy(displayImg, displayImage.ptr(), sizeof(unsigned char)*rows*cols * 3);
}
OP_EXPORT void getOutputs(c_OP op, float* array) {
if (output.getSize().size())
memcpy(array, output.getPtr(), output.getSize()[0] * output.getSize()[1] * output.getSize()[2] * sizeof(float));
}
OP_EXPORT void poseFromHeatmap(c_OP op, unsigned char* img, size_t rows, size_t cols, unsigned char* displayImg, float* hm, int* size, float* ratios) {
OpenPose* openPose = (OpenPose*)op;
cv::Mat image(rows, cols, CV_8UC3, img);
cv::Mat displayImage(rows, cols, CV_8UC3, displayImg);
std::vector<boost::shared_ptr<caffe::Blob<float>>> caffeNetOutputBlob;
for(int i=0; i<size[0]; i++){
for (int i = 0; i<size[0]; i++) {
boost::shared_ptr<caffe::Blob<float>> caffeHmPtr(new caffe::Blob<float>());
caffeHmPtr->Reshape(1,size[1],size[2]*((float)ratios[i]/(float)ratios[0]),size[3]*((float)ratios[i]/(float)ratios[0]));
float* startIndex = &hm[i*size[1]*size[2]*size[3]];
for(int d=0; d<caffeHmPtr->shape()[1]; d++){
for(int r=0; r<caffeHmPtr->shape()[2]; r++){
for(int c=0; c<caffeHmPtr->shape()[3]; c++){
int toI = d*caffeHmPtr->shape()[2]*caffeHmPtr->shape()[3] + r*caffeHmPtr->shape()[3] + c;
int fromI = d*size[2]*size[3] + r*size[3] + c;
caffeHmPtr->Reshape(1, size[1], size[2] * ((float)ratios[i] / (float)ratios[0]), size[3] * ((float)ratios[i] / (float)ratios[0]));
float* startIndex = &hm[i*size[1] * size[2] * size[3]];
for (int d = 0; d<caffeHmPtr->shape()[1]; d++) {
for (int r = 0; r<caffeHmPtr->shape()[2]; r++) {
for (int c = 0; c<caffeHmPtr->shape()[3]; c++) {
int toI = d*caffeHmPtr->shape()[2] * caffeHmPtr->shape()[3] + r*caffeHmPtr->shape()[3] + c;
int fromI = d*size[2] * size[3] + r*size[3] + c;
caffeHmPtr->mutable_cpu_data()[toI] = startIndex[fromI];
}
}
......@@ -319,22 +325,23 @@ void poseFromHeatmap(c_OP op, unsigned char* img, size_t rows, size_t cols, unsi
}
std::vector<op::Point<int>> imageSizes;
for(int i=0; i<size[0]; i++){
for (int i = 0; i<size[0]; i++) {
op::Point<int> point(cols*ratios[i], rows*ratios[i]);
imageSizes.emplace_back(point);
}
openPose->poseFromHeatmap(image, caffeNetOutputBlob, output, displayImage, imageSizes);
memcpy(displayImg, displayImage.ptr(), sizeof(unsigned char)*rows*cols*3);
memcpy(displayImg, displayImage.ptr(), sizeof(unsigned char)*rows*cols * 3);
// Copy back kp size
if(output.getSize().size()){
if (output.getSize().size()) {
size[0] = output.getSize()[0];
size[1] = output.getSize()[1];
size[2] = output.getSize()[2];
}else{
}
else {
size[0] = 0; size[1] = 0; size[2] = 0;
}
}
}
#ifdef __cplusplus
}
......
......@@ -2,18 +2,27 @@
Wrap the OpenPose library with Python.
To install run `make install` and library will be stored in /usr/local/python
"""
import numpy as np
import ctypes as ct
import cv2
import os
from sys import platform
dir_path = os.path.dirname(os.path.realpath(__file__))
if platform == "win32":
os.environ['PATH'] = dir_path + "/../../lib;" + os.environ['PATH']
os.environ['PATH'] = dir_path + "/../../x64/Release;" + os.environ['PATH']
class OpenPose(object):
"""
Ctypes linkage
"""
if platform == "linux" or platform == "linux2":
_libop= np.ctypeslib.load_library('_openpose', dir_path+'/_openpose.so')
elif platform == "darwin":
_libop= np.ctypeslib.load_library('_openpose', dir_path+'/_openpose.dylib')
elif platform == "win32":
_libop= np.ctypeslib.load_library('_openpose', dir_path+'/Release/_openpose.dll')
_libop.newOP.argtypes = [
ct.c_int, ct.c_char_p, ct.c_char_p, ct.c_char_p, ct.c_float, ct.c_float, ct.c_int, ct.c_float, ct.c_int, ct.c_bool, ct.c_char_p]
_libop.newOP.restype = ct.c_void_p
......@@ -37,6 +46,9 @@ class OpenPose(object):
np.ctypeslib.ndpointer(dtype=np.float32), np.ctypeslib.ndpointer(dtype=np.int32), np.ctypeslib.ndpointer(dtype=np.float32)]
_libop.poseFromHeatmap.restype = None
def encode(self, string):
return ct.c_char_p(string.encode('utf-8'))
def __init__(self, params):
"""
OpenPose Constructor: Prepares OpenPose object
......@@ -50,16 +62,16 @@ class OpenPose(object):
outs: OpenPose object
"""
self.op = self._libop.newOP(params["logging_level"],
params["output_resolution"],
params["net_resolution"],
params["model_pose"],
self.encode(params["output_resolution"]),
self.encode(params["net_resolution"]),
self.encode(params["model_pose"]),
params["alpha_pose"],
params["scale_gap"],
params["scale_number"],
params["render_threshold"],
params["num_gpu_start"],
params["disable_blending"],
params["default_model_folder"])
self.encode(params["default_model_folder"]))
def __del__(self):
"""
......@@ -206,20 +218,20 @@ if __name__ == "__main__":
params = dict()
params["logging_level"] = 3
params["output_resolution"] = "-1x-1"
params["net_resolution"] = "-1x736"
params["model_pose"] = "COCO"
params["net_resolution"] = "-1x368"
params["model_pose"] = "BODY_25"
params["alpha_pose"] = 0.6
params["scale_gap"] = 0.3
params["scale_number"] = 2
params["scale_number"] = 1
params["render_threshold"] = 0.05
params["num_gpu_start"] = 0
params["disable_blending"] = False
params["default_model_folder"] = "models/"
params["default_model_folder"] = "../../../models/"
openpose = OpenPose(params)
img = cv2.imread("examples/media/COCO_val2014_000000000192.jpg")
img = cv2.imread("../../../examples/media/COCO_val2014_000000000192.jpg")
arr, output_image = openpose.forward(img, True)
print arr
print(arr)
while 1:
cv2.imshow("output", output_image)
......
......@@ -176,10 +176,10 @@ namespace op
cl::Buffer targetPtrBuffer = cl::Buffer((cl_mem)(targetPtr), true);
auto nmsRegisterKernel = OpenCL::getInstance(gpuID)->getKernelFunctorFromManager
<NMSRegisterKernelFunctor, T>(
"nmsRegisterKernel",nmsOclCommonFunctions + nmsRegisterKernel);
"nmsRegisterKernel", op::nmsOclCommonFunctions + op::nmsRegisterKernel);
auto nmsWriteKernel = OpenCL::getInstance(gpuID)->getKernelFunctorFromManager
<NMSWriteKernelFunctor, T>(
"nmsWriteKernel", nmsOclCommonFunctions + nmsWriteKernel);
"nmsWriteKernel", op::nmsOclCommonFunctions + op::nmsWriteKernel);
// log("num_b: " + std::to_string(bottom->shape(0))); // = 1
// log("channel_b: " + std::to_string(bottom->shape(1))); // = 57 = 18 body parts + bkg + 19x2 PAFs
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册