提交 c8e38792 编写于 作者: R Raaj 提交者: Gines

Python fix (#674)

* Bug fixes for Python Windows and Cmake
* Update to python doc
* Added BUILD_DLL
上级 62eebd63
......@@ -235,9 +235,12 @@ option(DOWNLOAD_HAND_MODEL "Download hand model." ON)
option(BUILD_EXAMPLES "Build OpenPose examples." ON)
option(BUILD_DOCS "Build OpenPose documentation." OFF)
option(BUILD_PYTHON "Build OpenPose python." OFF)
if (WIN32)
option(BUILD_DLL "Copy all required DLL files into the same folder." ON)
endif ()
# Build as shared library
option(BUILD_SHARED_LIBS "Build as shared lib" ON)
option(BUILD_SHARED_LIBS "Build as shared lib." ON)
# Speed profiler
option(PROFILER_ENABLED "If enabled, OpenPose will be able to print out speed information at runtime." OFF)
......@@ -467,14 +470,18 @@ if (WIN32)
find_library(Caffe_Proto_LIB caffeproto HINTS ${FIND_LIB_PREFIX}/caffe/lib)
endif (${GPU_MODE} MATCHES "CPU_ONLY")
endif (${GPU_MODE} MATCHES "OPENCL")
if (${GPU_MODE} MATCHES "OPENCL")
unset(BOOST_SYSTEM_LIB_RELEASE CACHE)
unset(BOOST_SYSTEM_LIB_DEBUG CACHE)
find_library(BOOST_SYSTEM_LIB_RELEASE boost_system-vc140-mt-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)
find_library(BOOST_SYSTEM_LIB_DEBUG boost_system-vc140-mt-gd-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)
endif (${GPU_MODE} MATCHES "OPENCL")
# Boost DepCopy over required DLL F
if (${GPU_MODE} MATCHES "CPU_ONLY" OR ${GPU_MODE} MATCHES "OPENCL" OR BUILD_PYTHON)
find_library(BOOST_SYSTEM_LIB_RELEASE libboost_system-vc140-mt-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)
find_library(BOOST_SYSTEM_LIB_DEBUG libboost_system-vc140-mt-gd-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)
find_library(BOOST_FILESYSTEM_LIB_RELEASE libboost_filesystem-vc140-mt-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)
find_library(BOOST_FILESYSTEM_LIB_DEBUG libboost_filesystem-vc140-mt-gd-1_61 HINTS ${FIND_LIB_PREFIX}/caffe3rdparty/lib)
else ()
set(BOOST_SYSTEM_LIB_RELEASE "")
set(BOOST_SYSTEM_LIB_DEBUG "")
set(BOOST_FILESYSTEM_LIB_RELEASE "")
set(BOOST_FILESYSTEM_LIB_DEBUG "")
endif ()
if (WITH_3D_RENDERER)
find_library(GLUT_LIBRARY freeglut HINTS ${FIND_LIB_PREFIX}/freeglut/lib)
message(STATUS "\${GLUT_LIBRARY} = ${GLUT_LIBRARY}")
......@@ -507,6 +514,32 @@ if (WIN32)
set(SPINNAKER_INCLUDE_DIRS "3rdparty/windows/spinnaker/include")
endif (WITH_FLIR_CAMERA)
set(Caffe_FOUND 1)
# Build DLL Must be on if Build Python is on
if (BUILD_PYTHON)
if (NOT BUILD_DLL)
message(FATAL_ERROR "BUILD_DLL must be turned on to as well to build python library")
endif ()
endif ()
# Auto copy DLLs
if (BUILD_DLL)
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
# Auto copy DLLs
if (${GPU_MODE} MATCHES "CUDA")
file(GLOB CAFFE_DLL "${CMAKE_SOURCE_DIR}/3rdparty/windows/caffe/bin/*.dll")
elseif (${GPU_MODE} MATCHES "OPENCL")
file(GLOB CAFFE_DLL "${CMAKE_SOURCE_DIR}/3rdparty/windows/caffe_opencl/bin/*.dll")
elseif (${GPU_MODE} MATCHES "CPU_ONLY")
file(GLOB CAFFE_DLL "${CMAKE_SOURCE_DIR}/3rdparty/windows/caffe_cpu/bin/*.dll")
endif ()
file(GLOB OPENCV_DLL "${CMAKE_SOURCE_DIR}/3rdparty/windows/opencv/x64/vc14/bin/*.dll")
file(GLOB OPENCV3PTY_DLL "${CMAKE_SOURCE_DIR}/3rdparty/windows/caffe3rdparty/lib/*.dll")
file(COPY ${CAFFE_DLL} DESTINATION ${CMAKE_BINARY_DIR}/lib)
file(COPY ${OPENCV_DLL} DESTINATION ${CMAKE_BINARY_DIR}/lib)
file(COPY ${OPENCV3PTY_DLL} DESTINATION ${CMAKE_BINARY_DIR}/lib)
endif ()
endif (WIN32)
......@@ -739,11 +772,16 @@ if (USE_MKL)
endif (USE_MKL)
if (${GPU_MODE} MATCHES "OPENCL")
set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries} ${CMAKE_THREAD_LIBS_INIT} ${OpenCL_LIBRARIES})
if (WIN32)
set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries}
debug ${BOOST_SYSTEM_LIB_DEBUG} optimized ${BOOST_SYSTEM_LIB_RELEASE})
endif (WIN32)
endif (${GPU_MODE} MATCHES "OPENCL")
# Boost
if (WIN32)
if (${GPU_MODE} MATCHES "CPU_ONLY" OR ${GPU_MODE} MATCHES "OPENCL" OR BUILD_PYTHON)
set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries}
debug ${BOOST_SYSTEM_LIB_RELEASE} optimized ${BOOST_SYSTEM_LIB_RELEASE})
set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries}
debug ${BOOST_FILESYSTEM_LIB_RELEASE} optimized ${BOOST_FILESYSTEM_LIB_RELEASE})
endif ()
endif (WIN32)
# 3-D
if (WITH_3D_ADAM_MODEL)
set(OpenPose_3rdparty_libraries ${OpenPose_3rdparty_libraries}
......
......@@ -9,14 +9,23 @@ OpenPose Python Module
## Introduction
This experimental module exposes a Python API for OpenPose. This allows you to construct an OpenPose object, pass in a numpy array for an image, and get a numpy array of the pose positions. This API also exposes an API that allows you to directly pass in heatmaps from a network and extract poses out of it.
This experimental module exposes a Python API for OpenPose. This allows you to construct an OpenPose object, pass in a numpy array for an image, and get a numpy array of the pose positions. This API also exposes an API that allows you to directly pass in heatmaps from a network and extract poses out of it (Requires Python Caffe to be installed seperately)
At present the Python API only supports body pose. Hands and Face will be added in the future.
## Installation
Check [doc/installation.md#python-module](./installation.md#python-module) for installation steps.
To simply test the OpenPose API in your project without installation, ensure that the line `sys.path.append('{OpenPose_path}/python')` is set in your *.py files, where `{OpenPose_path}` points to your build folder of OpenPose. Take a look at `build/examples/tutorial_pose/1_extract_pose.py` for an example.
On an Ubuntu or OSX based system, you may use it globally. Running `sudo make install` will install OpenPose by default into `/usr/local/python`. You can set this into your python path and start using it at any location.
The Python API requires Numpy for array management, and OpenCV for image loading. They can be installed via:
```
pip install numpy
pip install opencv-python
```
## Compatibility
The OpenPose Python module is compatible with both Python 2 and Python 3. In addition, it will also run in all OpenPose compatible operating systems.
......@@ -26,56 +35,16 @@ The OpenPose Python module is compatible with both Python 2 and Python 3. In add
## Testing
Two examples can be found in `build/examples/tutorial_python` in your build folder. Navigate directly to this path to run examples.
- `1_extract_pose` demonstrates a simple use of the API.
- `2_pose_from_heatmaps` demonstrates constructing pose from heatmaps from the caffe network.
- `1_extract_pose` demonstrates a simple use of the API.
- `2_pose_from_heatmaps` demonstrates constructing pose from heatmaps from the caffe network. (Requires Python Caffe to be installed seperately)
```
# From command line
cd build/examples/tutorial_python
python
python 1_extract_pose.py
```
```python
# From Python
# It requires OpenCV installed for Python
import cv2
import os
import sys
# Remember to add your installation path here
# Option a
sys.path.append('{OpenPose_path}/python')
# Option b
# If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
# sys.path.append('/usr/local/python')
from openpose import *
# Parameters for OpenPose. Take a look at C++ OpenPose example for meaning of components. Ensure all below are filled
params = dict()
params["logging_level"] = 3
params["output_resolution"] = "-1x-1"
params["net_resolution"] = "-1x368"
params["model_pose"] = "BODY_25"
params["alpha_pose"] = 0.6
params["scale_gap"] = 0.3
params["scale_number"] = 1
params["render_threshold"] = 0.05
params["num_gpu_start"] = 0
# If GPU version is built, and multiple GPUs are available, set the ID here
params["disable_blending"] = False
params["default_model_folder"] = "/home/user/openpose/models"
# Construct OpenPose object allocates GPU memory
openpose = OpenPose(params)
while 1:
# Read new image
img = cv2.imread("image.png")
# Output keypoints and the image with the human skeleton blended on it
keypoints, output_image = openpose.forward(img, True)
# Print the human pose keypoints, i.e., a [#people x #keypoints x 3]-dimensional numpy object with the keypoints of all the people on that image
print keypoints
# Display the image
cv2.imshow("output", output_image)
cv2.waitKey(15)
```
## Code Sample
See `examples/tutorial_python/1_extract_pose.py`.
# From Python
# It requires OpenCV installed for Python
import sys
import cv2
import os
from sys import platform
# Remember to add your installation path here
# Option a
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append('../../python')
from openpose import *
if platform == "win32": sys.path.append(dir_path + '/../../python/openpose/');
else: sys.path.append('../../python');
# Option b
# If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
# sys.path.append('/usr/local/python')
# Parameters for OpenPose. Take a look at C++ OpenPose example for meaning of components. Ensure all below are filled
from openpose import *
params = dict()
params["logging_level"] = 3
params["output_resolution"] = "-1x-1"
params["net_resolution"] = "-1x368"
params["model_pose"] = "COCO"
params["model_pose"] = "BODY_25"
params["alpha_pose"] = 0.6
params["scale_gap"] = 0.3
params["scale_number"] = 1
params["render_threshold"] = 0.05
# If GPU version is built, and multiple GPUs are available, set the ID here
params["num_gpu_start"] = 0
params["disable_blending"] = False
# Ensure you point to the correct path where models are located
params["default_model_folder"] = dir_path + "/../../../models/"
# Construct OpenPose object allocates GPU memory
openpose = OpenPose(params)
img = cv2.imread(dir_path + "/../../../examples/media/COCO_val2014_000000000192.jpg")
arr, output_image = openpose.forward(img, True)
print arr
while 1:
# Read new image
img = cv2.imread("image.png")
# Output keypoints and the image with the human skeleton blended on it
keypoints, output_image = openpose.forward(img, True)
# Print the human pose keypoints, i.e., a [#people x #keypoints x 3]-dimensional numpy object with the keypoints of all the people on that image
print(keypoints)
# Display the image
cv2.imshow("output", output_image)
cv2.waitKey(15)
from sys import platform
import sys
try:
import caffe
except ImportError:
print("This sample can only be run if Python Caffe if available on your system")
print("Currently OpenPose does not compile Python Caffe. This may be supported in the future")
sys.exit(-1)
import os
os.environ["GLOG_minloglevel"] = "1"
import caffe
......@@ -36,7 +45,7 @@ caffe.set_device(0)
nets = []
for scale in scales:
nets.append(caffe.Net(Param.prototxt, Param.caffemodel, caffe.TEST))
print "Net loaded"
print("Net loaded")
# Test Function
first_run = True
......@@ -57,7 +66,7 @@ def func(frame):
net.reshape()
first_run = False
print "Reshaped"
print("Reshaped")
# Forward pass to get heatmaps
heatmaps = []
......
add_subdirectory(openpose)
add_subdirectory(openpose)
set(PYTHON_FILES
openpose.py
__init__.py
_openpose.cpp)
add_library(_openpose SHARED ${PYTHON_FILES})
target_link_libraries(_openpose openpose ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} ${Caffe_LIBS} ${MKL_LIBS} ${GLUT_LIBRARY} ${SPINNAKER_LIB} ${OpenCL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
SET_TARGET_PROPERTIES(_openpose PROPERTIES PREFIX "")
configure_file(openpose.py openpose.py)
configure_file(__init__.py __init__.py)
#install(TARGETS _openpose DESTINATION python)
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ DESTINATION python/openpose FILES_MATCHING PATTERN "*.so")
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ DESTINATION python/openpose FILES_MATCHING PATTERN "*.py")
set(PYTHON_FILES
openpose.py
__init__.py
_openpose.cpp)
add_library(_openpose SHARED ${PYTHON_FILES})
target_link_libraries(_openpose openpose ${OpenPose_3rdparty_libraries})
SET_TARGET_PROPERTIES(_openpose PROPERTIES PREFIX "")
configure_file(openpose.py openpose.py)
configure_file(__init__.py __init__.py)
#install(TARGETS _openpose DESTINATION python)
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ DESTINATION python/openpose FILES_MATCHING PATTERN "*.so")
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ DESTINATION python/openpose FILES_MATCHING PATTERN "*.py")
\ No newline at end of file
from openpose import *
from openpose import *
此差异已折叠。
"""
Wrap the OpenPose library with Python.
To install run `make install` and library will be stored in /usr/local/python
"""
import numpy as np
import ctypes as ct
import cv2
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
class OpenPose(object):
"""
Ctypes linkage
"""
_libop= np.ctypeslib.load_library('_openpose', dir_path+'/_openpose.so')
_libop.newOP.argtypes = [
ct.c_int, ct.c_char_p, ct.c_char_p, ct.c_char_p, ct.c_float, ct.c_float, ct.c_int, ct.c_float, ct.c_int, ct.c_bool, ct.c_char_p]
_libop.newOP.restype = ct.c_void_p
_libop.delOP.argtypes = [ct.c_void_p]
_libop.delOP.restype = None
_libop.forward.argtypes = [
ct.c_void_p, np.ctypeslib.ndpointer(dtype=np.uint8),
ct.c_size_t, ct.c_size_t,
np.ctypeslib.ndpointer(dtype=np.int32), np.ctypeslib.ndpointer(dtype=np.uint8), ct.c_bool]
_libop.forward.restype = None
_libop.getOutputs.argtypes = [
ct.c_void_p, np.ctypeslib.ndpointer(dtype=np.float32)]
_libop.getOutputs.restype = None
_libop.poseFromHeatmap.argtypes = [
ct.c_void_p, np.ctypeslib.ndpointer(dtype=np.uint8),
ct.c_size_t, ct.c_size_t,
np.ctypeslib.ndpointer(dtype=np.uint8),
np.ctypeslib.ndpointer(dtype=np.float32), np.ctypeslib.ndpointer(dtype=np.int32), np.ctypeslib.ndpointer(dtype=np.float32)]
_libop.poseFromHeatmap.restype = None
def __init__(self, params):
"""
OpenPose Constructor: Prepares OpenPose object
Parameters
----------
params : dict of required parameters. refer to openpose example for more details
Returns
-------
outs: OpenPose object
"""
self.op = self._libop.newOP(params["logging_level"],
params["output_resolution"],
params["net_resolution"],
params["model_pose"],
params["alpha_pose"],
params["scale_gap"],
params["scale_number"],
params["render_threshold"],
params["num_gpu_start"],
params["disable_blending"],
params["default_model_folder"])
def __del__(self):
"""
OpenPose Destructor: Destroys OpenPose object
"""
self._libop.delOP(self.op)
def forward(self, image, display = False):
"""
Forward: Takes in an image and returns the human 2D poses, along with drawn image if required
Parameters
----------
image : color image of type ndarray
display : If set to true, we return both the pose and an annotated image for visualization
Returns
-------
array: ndarray of human 2D poses [People * BodyPart * XYConfidence]
displayImage : image for visualization
"""
shape = image.shape
displayImage = np.zeros(shape=(image.shape),dtype=np.uint8)
size = np.zeros(shape=(3),dtype=np.int32)
self._libop.forward(self.op, image, shape[0], shape[1], size, displayImage, display)
array = np.zeros(shape=(size),dtype=np.float32)
self._libop.getOutputs(self.op, array)
if display:
return array, displayImage
return array
def poseFromHM(self, image, hm, ratios=[1]):
"""
Pose From Heatmap: Takes in an image, computed heatmaps, and require scales and computes pose
Parameters
----------
image : color image of type ndarray
hm : heatmap of type ndarray with heatmaps and part affinity fields
ratios : scaling ration if needed to fuse multiple scales
Returns
-------
array: ndarray of human 2D poses [People * BodyPart * XYConfidence]
displayImage : image for visualization
"""
if len(ratios) != len(hm):
raise Exception("Ratio shape mismatch")
# Find largest
hm_combine = np.zeros(shape=(len(hm), hm[0].shape[1], hm[0].shape[2], hm[0].shape[3]),dtype=np.float32)
i=0
for h in hm:
hm_combine[i,:,0:h.shape[2],0:h.shape[3]] = h
i+=1
hm = hm_combine
ratios = np.array(ratios,dtype=np.float32)
shape = image.shape
displayImage = np.zeros(shape=(image.shape),dtype=np.uint8)
size = np.zeros(shape=(4),dtype=np.int32)
size[0] = hm.shape[0]
size[1] = hm.shape[1]
size[2] = hm.shape[2]
size[3] = hm.shape[3]
self._libop.poseFromHeatmap(self.op, image, shape[0], shape[1], displayImage, hm, size, ratios)
array = np.zeros(shape=(size[0],size[1],size[2]),dtype=np.float32)
self._libop.getOutputs(self.op, array)
return array, displayImage
@staticmethod
def process_frames(frame, boxsize = 368, scales = [1]):
base_net_res = None
imagesForNet = []
imagesOrig = []
for idx, scale in enumerate(scales):
# Calculate net resolution (width, height)
if idx == 0:
net_res = (16 * int((boxsize * frame.shape[1] / float(frame.shape[0]) / 16) + 0.5), boxsize)
base_net_res = net_res
else:
net_res = ((min(base_net_res[0], max(1, int((base_net_res[0] * scale)+0.5)/16*16))),
(min(base_net_res[1], max(1, int((base_net_res[1] * scale)+0.5)/16*16))))
input_res = [frame.shape[1], frame.shape[0]]
scale_factor = min((net_res[0] - 1) / float(input_res[0] - 1), (net_res[1] - 1) / float(input_res[1] - 1))
warp_matrix = np.array([[scale_factor,0,0],
[0,scale_factor,0]])
if scale_factor != 1:
imageForNet = cv2.warpAffine(frame, warp_matrix, net_res, flags=(cv2.INTER_AREA if scale_factor < 1. else cv2.INTER_CUBIC), borderMode=cv2.BORDER_CONSTANT, borderValue=(0,0,0))
else:
imageForNet = frame.copy()
imageOrig = imageForNet.copy()
imageForNet = imageForNet.astype(float)
imageForNet = imageForNet/256. - 0.5
imageForNet = np.transpose(imageForNet, (2,0,1))
imagesForNet.append(imageForNet)
imagesOrig.append(imageOrig)
return imagesForNet, imagesOrig
@staticmethod
def draw_all(imageForNet, heatmaps, currIndex, div=4., norm=False):
netDecreaseFactor = float(imageForNet.shape[0]) / float(heatmaps.shape[2]) # 8
resized_heatmaps = np.zeros(shape=(heatmaps.shape[0], heatmaps.shape[1], imageForNet.shape[0], imageForNet.shape[1]))
num_maps = heatmaps.shape[1]
combined = None
for i in range(0, num_maps):
heatmap = heatmaps[0,i,:,:]
resizedHeatmap = cv2.resize(heatmap, (0,0), fx=netDecreaseFactor, fy=netDecreaseFactor)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(resizedHeatmap)
if i==currIndex and currIndex >=0:
resizedHeatmap = np.abs(resizedHeatmap)
resizedHeatmap = (resizedHeatmap*255.).astype(dtype='uint8')
im_color = cv2.applyColorMap(resizedHeatmap, cv2.COLORMAP_JET)
resizedHeatmap = cv2.addWeighted(imageForNet, 1, im_color, 0.3, 0)
cv2.circle(resizedHeatmap, (int(maxLoc[0]),int(maxLoc[1])), 5, (255,0,0), -1)
return resizedHeatmap
else:
resizedHeatmap = np.abs(resizedHeatmap)
if combined is None:
combined = np.copy(resizedHeatmap);
else:
if i <= num_maps-2:
combined += resizedHeatmap;
if norm:
combined = np.maximum(0, np.minimum(1, combined));
if currIndex < 0:
combined /= div
combined = (combined*255.).astype(dtype='uint8')
im_color = cv2.applyColorMap(combined, cv2.COLORMAP_JET)
combined = cv2.addWeighted(imageForNet, 0.5, im_color, 0.5, 0)
cv2.circle(combined, (int(maxLoc[0]),int(maxLoc[1])), 5, (255,0,0), -1)
return combined
if __name__ == "__main__":
params = dict()
params["logging_level"] = 3
params["output_resolution"] = "-1x-1"
params["net_resolution"] = "-1x736"
params["model_pose"] = "COCO"
params["alpha_pose"] = 0.6
params["scale_gap"] = 0.3
params["scale_number"] = 2
params["render_threshold"] = 0.05
params["num_gpu_start"] = 0
params["disable_blending"] = False
params["default_model_folder"] = "models/"
openpose = OpenPose(params)
img = cv2.imread("examples/media/COCO_val2014_000000000192.jpg")
arr, output_image = openpose.forward(img, True)
print arr
while 1:
cv2.imshow("output", output_image)
cv2.waitKey(15)
"""
Wrap the OpenPose library with Python.
To install run `make install` and library will be stored in /usr/local/python
"""
import numpy as np
import ctypes as ct
import cv2
import os
from sys import platform
dir_path = os.path.dirname(os.path.realpath(__file__))
if platform == "win32":
os.environ['PATH'] = dir_path + "/../../lib;" + os.environ['PATH']
os.environ['PATH'] = dir_path + "/../../x64/Release;" + os.environ['PATH']
class OpenPose(object):
"""
Ctypes linkage
"""
if platform == "linux" or platform == "linux2":
_libop= np.ctypeslib.load_library('_openpose', dir_path+'/_openpose.so')
elif platform == "darwin":
_libop= np.ctypeslib.load_library('_openpose', dir_path+'/_openpose.dylib')
elif platform == "win32":
_libop= np.ctypeslib.load_library('_openpose', dir_path+'/Release/_openpose.dll')
_libop.newOP.argtypes = [
ct.c_int, ct.c_char_p, ct.c_char_p, ct.c_char_p, ct.c_float, ct.c_float, ct.c_int, ct.c_float, ct.c_int, ct.c_bool, ct.c_char_p]
_libop.newOP.restype = ct.c_void_p
_libop.delOP.argtypes = [ct.c_void_p]
_libop.delOP.restype = None
_libop.forward.argtypes = [
ct.c_void_p, np.ctypeslib.ndpointer(dtype=np.uint8),
ct.c_size_t, ct.c_size_t,
np.ctypeslib.ndpointer(dtype=np.int32), np.ctypeslib.ndpointer(dtype=np.uint8), ct.c_bool]
_libop.forward.restype = None
_libop.getOutputs.argtypes = [
ct.c_void_p, np.ctypeslib.ndpointer(dtype=np.float32)]
_libop.getOutputs.restype = None
_libop.poseFromHeatmap.argtypes = [
ct.c_void_p, np.ctypeslib.ndpointer(dtype=np.uint8),
ct.c_size_t, ct.c_size_t,
np.ctypeslib.ndpointer(dtype=np.uint8),
np.ctypeslib.ndpointer(dtype=np.float32), np.ctypeslib.ndpointer(dtype=np.int32), np.ctypeslib.ndpointer(dtype=np.float32)]
_libop.poseFromHeatmap.restype = None
def encode(self, string):
return ct.c_char_p(string.encode('utf-8'))
def __init__(self, params):
"""
OpenPose Constructor: Prepares OpenPose object
Parameters
----------
params : dict of required parameters. refer to openpose example for more details
Returns
-------
outs: OpenPose object
"""
self.op = self._libop.newOP(params["logging_level"],
self.encode(params["output_resolution"]),
self.encode(params["net_resolution"]),
self.encode(params["model_pose"]),
params["alpha_pose"],
params["scale_gap"],
params["scale_number"],
params["render_threshold"],
params["num_gpu_start"],
params["disable_blending"],
self.encode(params["default_model_folder"]))
def __del__(self):
"""
OpenPose Destructor: Destroys OpenPose object
"""
self._libop.delOP(self.op)
def forward(self, image, display = False):
"""
Forward: Takes in an image and returns the human 2D poses, along with drawn image if required
Parameters
----------
image : color image of type ndarray
display : If set to true, we return both the pose and an annotated image for visualization
Returns
-------
array: ndarray of human 2D poses [People * BodyPart * XYConfidence]
displayImage : image for visualization
"""
shape = image.shape
displayImage = np.zeros(shape=(image.shape),dtype=np.uint8)
size = np.zeros(shape=(3),dtype=np.int32)
self._libop.forward(self.op, image, shape[0], shape[1], size, displayImage, display)
array = np.zeros(shape=(size),dtype=np.float32)
self._libop.getOutputs(self.op, array)
if display:
return array, displayImage
return array
def poseFromHM(self, image, hm, ratios=[1]):
"""
Pose From Heatmap: Takes in an image, computed heatmaps, and require scales and computes pose
Parameters
----------
image : color image of type ndarray
hm : heatmap of type ndarray with heatmaps and part affinity fields
ratios : scaling ration if needed to fuse multiple scales
Returns
-------
array: ndarray of human 2D poses [People * BodyPart * XYConfidence]
displayImage : image for visualization
"""
if len(ratios) != len(hm):
raise Exception("Ratio shape mismatch")
# Find largest
hm_combine = np.zeros(shape=(len(hm), hm[0].shape[1], hm[0].shape[2], hm[0].shape[3]),dtype=np.float32)
i=0
for h in hm:
hm_combine[i,:,0:h.shape[2],0:h.shape[3]] = h
i+=1
hm = hm_combine
ratios = np.array(ratios,dtype=np.float32)
shape = image.shape
displayImage = np.zeros(shape=(image.shape),dtype=np.uint8)
size = np.zeros(shape=(4),dtype=np.int32)
size[0] = hm.shape[0]
size[1] = hm.shape[1]
size[2] = hm.shape[2]
size[3] = hm.shape[3]
self._libop.poseFromHeatmap(self.op, image, shape[0], shape[1], displayImage, hm, size, ratios)
array = np.zeros(shape=(size[0],size[1],size[2]),dtype=np.float32)
self._libop.getOutputs(self.op, array)
return array, displayImage
@staticmethod
def process_frames(frame, boxsize = 368, scales = [1]):
base_net_res = None
imagesForNet = []
imagesOrig = []
for idx, scale in enumerate(scales):
# Calculate net resolution (width, height)
if idx == 0:
net_res = (16 * int((boxsize * frame.shape[1] / float(frame.shape[0]) / 16) + 0.5), boxsize)
base_net_res = net_res
else:
net_res = ((min(base_net_res[0], max(1, int((base_net_res[0] * scale)+0.5)/16*16))),
(min(base_net_res[1], max(1, int((base_net_res[1] * scale)+0.5)/16*16))))
input_res = [frame.shape[1], frame.shape[0]]
scale_factor = min((net_res[0] - 1) / float(input_res[0] - 1), (net_res[1] - 1) / float(input_res[1] - 1))
warp_matrix = np.array([[scale_factor,0,0],
[0,scale_factor,0]])
if scale_factor != 1:
imageForNet = cv2.warpAffine(frame, warp_matrix, net_res, flags=(cv2.INTER_AREA if scale_factor < 1. else cv2.INTER_CUBIC), borderMode=cv2.BORDER_CONSTANT, borderValue=(0,0,0))
else:
imageForNet = frame.copy()
imageOrig = imageForNet.copy()
imageForNet = imageForNet.astype(float)
imageForNet = imageForNet/256. - 0.5
imageForNet = np.transpose(imageForNet, (2,0,1))
imagesForNet.append(imageForNet)
imagesOrig.append(imageOrig)
return imagesForNet, imagesOrig
@staticmethod
def draw_all(imageForNet, heatmaps, currIndex, div=4., norm=False):
netDecreaseFactor = float(imageForNet.shape[0]) / float(heatmaps.shape[2]) # 8
resized_heatmaps = np.zeros(shape=(heatmaps.shape[0], heatmaps.shape[1], imageForNet.shape[0], imageForNet.shape[1]))
num_maps = heatmaps.shape[1]
combined = None
for i in range(0, num_maps):
heatmap = heatmaps[0,i,:,:]
resizedHeatmap = cv2.resize(heatmap, (0,0), fx=netDecreaseFactor, fy=netDecreaseFactor)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(resizedHeatmap)
if i==currIndex and currIndex >=0:
resizedHeatmap = np.abs(resizedHeatmap)
resizedHeatmap = (resizedHeatmap*255.).astype(dtype='uint8')
im_color = cv2.applyColorMap(resizedHeatmap, cv2.COLORMAP_JET)
resizedHeatmap = cv2.addWeighted(imageForNet, 1, im_color, 0.3, 0)
cv2.circle(resizedHeatmap, (int(maxLoc[0]),int(maxLoc[1])), 5, (255,0,0), -1)
return resizedHeatmap
else:
resizedHeatmap = np.abs(resizedHeatmap)
if combined is None:
combined = np.copy(resizedHeatmap);
else:
if i <= num_maps-2:
combined += resizedHeatmap;
if norm:
combined = np.maximum(0, np.minimum(1, combined));
if currIndex < 0:
combined /= div
combined = (combined*255.).astype(dtype='uint8')
im_color = cv2.applyColorMap(combined, cv2.COLORMAP_JET)
combined = cv2.addWeighted(imageForNet, 0.5, im_color, 0.5, 0)
cv2.circle(combined, (int(maxLoc[0]),int(maxLoc[1])), 5, (255,0,0), -1)
return combined
if __name__ == "__main__":
params = dict()
params["logging_level"] = 3
params["output_resolution"] = "-1x-1"
params["net_resolution"] = "-1x368"
params["model_pose"] = "BODY_25"
params["alpha_pose"] = 0.6
params["scale_gap"] = 0.3
params["scale_number"] = 1
params["render_threshold"] = 0.05
params["num_gpu_start"] = 0
params["disable_blending"] = False
params["default_model_folder"] = "../../../models/"
openpose = OpenPose(params)
img = cv2.imread("../../../examples/media/COCO_val2014_000000000192.jpg")
arr, output_image = openpose.forward(img, True)
print(arr)
while 1:
cv2.imshow("output", output_image)
cv2.waitKey(15)
......@@ -176,10 +176,10 @@ namespace op
cl::Buffer targetPtrBuffer = cl::Buffer((cl_mem)(targetPtr), true);
auto nmsRegisterKernel = OpenCL::getInstance(gpuID)->getKernelFunctorFromManager
<NMSRegisterKernelFunctor, T>(
"nmsRegisterKernel",nmsOclCommonFunctions + nmsRegisterKernel);
"nmsRegisterKernel", op::nmsOclCommonFunctions + op::nmsRegisterKernel);
auto nmsWriteKernel = OpenCL::getInstance(gpuID)->getKernelFunctorFromManager
<NMSWriteKernelFunctor, T>(
"nmsWriteKernel", nmsOclCommonFunctions + nmsWriteKernel);
"nmsWriteKernel", op::nmsOclCommonFunctions + op::nmsWriteKernel);
// log("num_b: " + std::to_string(bottom->shape(0))); // = 1
// log("channel_b: " + std::to_string(bottom->shape(1))); // = 57 = 18 body parts + bkg + 19x2 PAFs
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册