提交 0f3aedb2 编写于 作者: G gineshidalgo99

Added hands rectangle detection + wrapperstruct

上级 1adb1b15
......@@ -24,7 +24,7 @@ Each flag is divided into flag name, default value, and description.
- DEFINE_int32(frame_rotate, 0, "Rotate each frame, 4 possible values: 0, 90, 180, 270.");
- DEFINE_bool(frames_repeat, false, "Repeat frames when finished.");
3. OpenPose
- DEFINE_string(model_folder, "models/", "Folder where the pose models (COCO and MPI) are located.");
- DEFINE_string(model_folder, "models/", "Folder path (absolute or relative) where the models (pose, face, ...) are located.");
- DEFINE_string(resolution, "1280x720", "The image resolution (display and output). Use \"-1x-1\" to force the program to use the default images resolution.");
- DEFINE_int32(num_gpu, -1, "The number of GPU devices to use. If negative, it will use all the available GPUs in your machine.");
- DEFINE_int32(num_gpu_start, 0, "GPU device start number.");
......@@ -41,6 +41,8 @@ Each flag is divided into flag name, default value, and description.
- DEFINE_bool(face, false, "Enables face keypoint detection. It will share some parameters from the body pose, e.g. `model_folder`.");
- DEFINE_string(face_net_resolution, "368x368", "Multiples of 16. Analogous to `net_resolution` but applied to the face keypoint detector. 320x320 usually works fine while giving a substantial speed up when multiple faces on the image.");
6. OpenPose Hand
- DEFINE_bool(hand, false, "Enables hand keypoint detection. It will share some parameters from the body pose, e.g. `model_folder`.");
- DEFINE_string(hand_net_resolution, "368x368", "Multiples of 16. Analogous to `net_resolution` but applied to the hand keypoint detector. 320x320 usually works fine while giving a substantial speed up when multiple hands on the image.");
7. OpenPose Rendering
- DEFINE_int32(part_to_show, 0, "Part to show from the start.");
- DEFINE_bool(disable_blending, false, "If blending is enabled, it will merge the results with the original frame. If disabled, it will only display the results.");
......@@ -52,16 +54,20 @@ Each flag is divided into flag name, default value, and description.
- DEFINE_int32(render_face, -1, "Analogous to `render_pose` but applied to the face. Extra option: -1 to use the same configuration that `render_pose` is using.");
- DEFINE_double(alpha_face, 0.6, "Analogous to `alpha_pose` but applied to face.");
- DEFINE_double(alpha_heatmap_face, 0.7, "Analogous to `alpha_heatmap` but applied to face.");
10. Display
10. OpenPose Rendering Hand
- DEFINE_int32(render_hand, -1, "Analogous to `render_pose` but applied to the hand. Extra option: -1 to use the same configuration that `render_pose` is using.");
- DEFINE_double(alpha_hand, 0.6, "Analogous to `alpha_pose` but applied to hand.");
- DEFINE_double(alpha_heatmap_hand, 0.7, "Analogous to `alpha_heatmap` but applied to hand.");
11. Display
- DEFINE_bool(fullscreen, false, "Run in full-screen mode (press f during runtime to toggle).");
- DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g. for video). If the processing time is too long, it will skip frames. If it is too fast, it will slow it down.");
- DEFINE_bool(no_gui_verbose, false, "Do not write text on output images on GUI (e.g. number of current frame and people). It does not affect the pose rendering.");
- DEFINE_bool(no_display, false, "Do not open a display window.");
11. Result Saving
12. Result Saving
- DEFINE_string(write_images, "", "Directory to write rendered frames in `write_images_format` image format.");
- DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g. png, jpg or bmp. Check the OpenCV function cv::imwrite for all compatible extensions.");
- DEFINE_string(write_video, "", "Full file path to write rendered frames in motion JPEG video format. It might fail if the final path does not finish in `.avi`. It internally uses cv::VideoWriter.");
- DEFINE_string(write_keypoint, "", "Directory to write the people pose keypoint data. Format with `write_keypoint_format`.");
- DEFINE_string(write_keypoint, "", "Directory to write the people body pose keypoint data. Set format with `write_keypoint_format`.");
- DEFINE_string(write_keypoint_format, "yml", "File extension and format for `write_keypoint`: json, xml, yaml & yml. Json not available for OpenCV < 3.0, use `write_keypoint_json` instead.");
- DEFINE_string(write_keypoint_json, "", "Directory to write people pose data in *.json format, compatible with any OpenCV version.");
- DEFINE_string(write_coco_json, "", "Full file path to write people pose data with *.json COCO validation format.");
......
......@@ -64,7 +64,7 @@ DEFINE_bool(frame_flip, false, "Flip/mirror each frame
DEFINE_int32(frame_rotate, 0, "Rotate each frame, 4 possible values: 0, 90, 180, 270.");
DEFINE_bool(frames_repeat, false, "Repeat frames when finished.");
// OpenPose
DEFINE_string(model_folder, "models/", "Folder where the pose models (COCO and MPI) are located.");
DEFINE_string(model_folder, "models/", "Folder path (absolute or relative) where the models (pose, face, ...) are located.");
DEFINE_string(resolution, "1280x720", "The image resolution (display and output). Use \"-1x-1\" to force the program to use the"
" default images resolution.");
DEFINE_int32(num_gpu, -1, "The number of GPU devices to use. If negative, it will use all the available GPUs in your"
......@@ -99,6 +99,9 @@ DEFINE_string(face_net_resolution, "368x368", "Multiples of 16. Analog
" 320x320 usually works fine while giving a substantial speed up when multiple faces on the"
" image.");
// OpenPose Hand
DEFINE_bool(hand, false, "Enables hand keypoint detection. It will share some parameters from the body pose, e.g."
" `model_folder`.");
DEFINE_string(hand_net_resolution, "368x368", "Multiples of 16. Analogous to `net_resolution` but applied to the hand keypoint detector.");
// OpenPose Rendering
DEFINE_int32(part_to_show, 0, "Part to show from the start.");
DEFINE_bool(disable_blending, false, "If blending is enabled, it will merge the results with the original frame. If disabled, it"
......@@ -117,6 +120,11 @@ DEFINE_int32(render_face, -1, "Analogous to `render_po
" configuration that `render_pose` is using.");
DEFINE_double(alpha_face, 0.6, "Analogous to `alpha_pose` but applied to face.");
DEFINE_double(alpha_heatmap_face, 0.7, "Analogous to `alpha_heatmap` but applied to face.");
// OpenPose Rendering Hand
DEFINE_int32(render_hand, -1, "Analogous to `render_pose` but applied to the hand. Extra option: -1 to use the same"
" configuration that `render_pose` is using.");
DEFINE_double(alpha_hand, 0.6, "Analogous to `alpha_pose` but applied to hand.");
DEFINE_double(alpha_heatmap_hand, 0.7, "Analogous to `alpha_heatmap` but applied to hand.");
// Display
DEFINE_bool(fullscreen, false, "Run in full-screen mode (press f during runtime to toggle).");
DEFINE_bool(process_real_time, false, "Enable to keep the original source frame rate (e.g. for video). If the processing time is"
......@@ -130,7 +138,7 @@ DEFINE_string(write_images_format, "png", "File extension and form
" function cv::imwrite for all compatible extensions.");
DEFINE_string(write_video, "", "Full file path to write rendered frames in motion JPEG video format. It might fail if the"
" final path does not finish in `.avi`. It internally uses cv::VideoWriter.");
DEFINE_string(write_keypoint, "", "Directory to write the people pose keypoint data. Format with `write_keypoint_format`.");
DEFINE_string(write_keypoint, "", "Directory to write the people body pose keypoint data. Set format with `write_keypoint_format`.");
DEFINE_string(write_keypoint_format, "yml", "File extension and format for `write_keypoint`: json, xml, yaml & yml. Json not available"
" for OpenCV < 3.0, use `write_keypoint_json` instead.");
DEFINE_string(write_keypoint_json, "", "Directory to write people pose data in *.json format, compatible with any OpenCV version.");
......@@ -248,7 +256,7 @@ op::RenderMode gflagToRenderMode(const int renderFlag, const int renderPoseFlag
}
// Google flags into program variables
std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, std::shared_ptr<op::Producer>, op::PoseModel, op::ScaleMode,
std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, op::Point<int>, std::shared_ptr<op::Producer>, op::PoseModel, op::ScaleMode,
std::vector<op::HeatMapType>> gflagsToOpParameters()
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
......@@ -272,6 +280,11 @@ std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, std::shared_ptr<op::P
nRead = sscanf(FLAGS_face_net_resolution.c_str(), "%dx%d", &faceNetInputSize.x, &faceNetInputSize.y);
op::checkE(nRead, 2, "Error, face net resolution format (" + FLAGS_face_net_resolution
+ ") invalid, should be e.g., 368x368 (multiples of 16)", __LINE__, __FUNCTION__, __FILE__);
// handNetInputSize
op::Point<int> handNetInputSize;
nRead = sscanf(FLAGS_hand_net_resolution.c_str(), "%dx%d", &handNetInputSize.x, &handNetInputSize.y);
op::checkE(nRead, 2, "Error, hand net resolution format (" + FLAGS_hand_net_resolution
+ ") invalid, should be e.g., 368x368 (multiples of 16)", __LINE__, __FUNCTION__, __FILE__);
// producerType
const auto producerSharedPtr = gflagsToProducer(FLAGS_image_dir, FLAGS_video, FLAGS_camera, cameraFrameSize, FLAGS_camera_fps);
// poseModel
......@@ -281,7 +294,7 @@ std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, std::shared_ptr<op::P
// heatmaps to add
const auto heatMapTypes = gflagToHeatMaps(FLAGS_heatmaps_add_parts, FLAGS_heatmaps_add_bkg, FLAGS_heatmaps_add_PAFs);
// Return
return std::make_tuple(outputSize, netInputSize, faceNetInputSize, producerSharedPtr, poseModel, keypointScale, heatMapTypes);
return std::make_tuple(outputSize, netInputSize, faceNetInputSize, handNetInputSize, producerSharedPtr, poseModel, keypointScale, heatMapTypes);
}
int opRealTimePoseDemo()
......@@ -298,11 +311,13 @@ int opRealTimePoseDemo()
op::Point<int> outputSize;
op::Point<int> netInputSize;
op::Point<int> faceNetInputSize;
op::Point<int> handNetInputSize;
std::shared_ptr<op::Producer> producerSharedPtr;
op::PoseModel poseModel;
op::ScaleMode keypointScale;
std::vector<op::HeatMapType> heatMapTypes;
std::tie(outputSize, netInputSize, faceNetInputSize, producerSharedPtr, poseModel, keypointScale, heatMapTypes) = gflagsToOpParameters();
std::tie(outputSize, netInputSize, faceNetInputSize, handNetInputSize, producerSharedPtr, poseModel, keypointScale,
heatMapTypes) = gflagsToOpParameters();
// OpenPose wrapper
op::log("Configuring OpenPose wrapper.", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
......@@ -316,7 +331,8 @@ int opRealTimePoseDemo()
const op::WrapperStructFace wrapperStructFace{FLAGS_face, faceNetInputSize, gflagToRenderMode(FLAGS_render_face, FLAGS_render_pose),
(float)FLAGS_alpha_face, (float)FLAGS_alpha_heatmap_face};
// Hand configuration (use op::WrapperStructHand{} to disable it)
const op::experimental::WrapperStructHand wrapperStructHand{false};
const op::WrapperStructHand wrapperStructHand{FLAGS_hand, handNetInputSize, gflagToRenderMode(FLAGS_render_hand, FLAGS_render_pose),
(float)FLAGS_alpha_hand, (float)FLAGS_alpha_heatmap_hand};
// Producer (use default to disable any input)
const op::WrapperStructInput wrapperStructInput{producerSharedPtr, FLAGS_frame_first, FLAGS_frame_last, FLAGS_process_real_time,
FLAGS_frame_flip, FLAGS_frame_rotate, FLAGS_frames_repeat};
......
......@@ -49,7 +49,7 @@ DEFINE_int32(logging_level, 3, "The logging level. Inte
// Producer
DEFINE_string(image_dir, "examples/media/", "Process a directory of images.");
// OpenPose
DEFINE_string(model_folder, "models/", "Folder where the pose models (COCO and MPI) are located.");
DEFINE_string(model_folder, "models/", "Folder path (absolute or relative) where the models (pose, face, ...) are located.");
DEFINE_string(resolution, "1280x720", "The image resolution (display and output). Use \"-1x-1\" to force the program to use the"
" default images resolution.");
DEFINE_int32(num_gpu, -1, "The number of GPU devices to use. If negative, it will use all the available GPUs in your"
......@@ -86,6 +86,9 @@ DEFINE_string(face_net_resolution, "368x368", "Multiples of 16. Analog
" 320x320 usually works fine while giving a substantial speed up when multiple faces on the"
" image.");
// OpenPose Hand
DEFINE_bool(hand, false, "Enables hand keypoint detection. It will share some parameters from the body pose, e.g."
" `model_folder`.");
DEFINE_string(hand_net_resolution, "368x368", "Multiples of 16. Analogous to `net_resolution` but applied to the hand keypoint detector.");
// OpenPose Rendering
DEFINE_int32(part_to_show, 0, "Part to show from the start.");
DEFINE_bool(disable_blending, false, "If blending is enabled, it will merge the results with the original frame. If disabled, it"
......@@ -104,13 +107,18 @@ DEFINE_int32(render_face, -1, "Analogous to `render_po
" configuration that `render_pose` is using.");
DEFINE_double(alpha_face, 0.6, "Analogous to `alpha_pose` but applied to face.");
DEFINE_double(alpha_heatmap_face, 0.7, "Analogous to `alpha_heatmap` but applied to face.");
// OpenPose Rendering Hand
DEFINE_int32(render_hand, -1, "Analogous to `render_pose` but applied to the hand. Extra option: -1 to use the same"
" configuration that `render_pose` is using.");
DEFINE_double(alpha_hand, 0.6, "Analogous to `alpha_pose` but applied to hand.");
DEFINE_double(alpha_heatmap_hand, 0.7, "Analogous to `alpha_heatmap` but applied to hand.");
// Result Saving
DEFINE_string(write_images, "", "Directory to write rendered frames in `write_images_format` image format.");
DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g. png, jpg or bmp. Check the OpenCV"
" function cv::imwrite for all compatible extensions.");
DEFINE_string(write_video, "", "Full file path to write rendered frames in motion JPEG video format. It might fail if the"
" final path does not finish in `.avi`. It internally uses cv::VideoWriter.");
DEFINE_string(write_keypoint, "", "Directory to write the people pose keypoint data. Format with `write_keypoint_format`.");
DEFINE_string(write_keypoint, "", "Directory to write the people body pose keypoint data. Set format with `write_keypoint_format`.");
DEFINE_string(write_keypoint_format, "yml", "File extension and format for `write_keypoint`: json, xml, yaml & yml. Json not available"
" for OpenCV < 3.0, use `write_keypoint_json` instead.");
DEFINE_string(write_keypoint_json, "", "Directory to write people pose data in *.json format, compatible with any OpenCV version.");
......@@ -281,7 +289,7 @@ op::RenderMode gflagToRenderMode(const int renderFlag, const int renderPoseFlag
}
// Google flags into program variables
std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, op::PoseModel, op::ScaleMode, std::vector<op::HeatMapType>,
std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, op::Point<int>, op::PoseModel, op::ScaleMode, std::vector<op::HeatMapType>,
op::ScaleMode> gflagsToOpParameters()
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
......@@ -300,6 +308,11 @@ std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, op::PoseModel, op::Sc
nRead = sscanf(FLAGS_face_net_resolution.c_str(), "%dx%d", &faceNetInputSize.x, &faceNetInputSize.y);
op::checkE(nRead, 2, "Error, face net resolution format (" + FLAGS_face_net_resolution
+ ") invalid, should be e.g., 368x368 (multiples of 16)", __LINE__, __FUNCTION__, __FILE__);
// handNetInputSize
op::Point<int> handNetInputSize;
nRead = sscanf(FLAGS_hand_net_resolution.c_str(), "%dx%d", &handNetInputSize.x, &handNetInputSize.y);
op::checkE(nRead, 2, "Error, hand net resolution format (" + FLAGS_hand_net_resolution
+ ") invalid, should be e.g., 368x368 (multiples of 16)", __LINE__, __FUNCTION__, __FILE__);
// poseModel
const auto poseModel = gflagToPoseModel(FLAGS_model_pose);
// keypointScale
......@@ -310,7 +323,7 @@ std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, op::PoseModel, op::Sc
const auto heatMapScale = (FLAGS_heatmaps_scale == 0 ? op::ScaleMode::PlusMinusOne
: (FLAGS_heatmaps_scale == 1 ? op::ScaleMode::ZeroToOne : op::ScaleMode::UnsignedChar ));
// return
return std::make_tuple(outputSize, netInputSize, faceNetInputSize, poseModel, keypointScale, heatMapTypes, heatMapScale);
return std::make_tuple(outputSize, netInputSize, faceNetInputSize, handNetInputSize, poseModel, keypointScale, heatMapTypes, heatMapScale);
}
int openPoseTutorialWrapper1()
......@@ -327,18 +340,17 @@ int openPoseTutorialWrapper1()
op::Point<int> outputSize;
op::Point<int> netInputSize;
op::Point<int> faceNetInputSize;
op::Point<int> handNetInputSize;
op::PoseModel poseModel;
op::ScaleMode keypointScale;
std::vector<op::HeatMapType> heatMapTypes;
op::ScaleMode heatMapScale;
std::tie(outputSize, netInputSize, faceNetInputSize, poseModel, keypointScale, heatMapTypes, heatMapScale) = gflagsToOpParameters();
std::tie(outputSize, netInputSize, faceNetInputSize, handNetInputSize, poseModel, keypointScale, heatMapTypes, heatMapScale) = gflagsToOpParameters();
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Configure OpenPose
op::Wrapper<std::vector<UserDatum>> opWrapper{op::ThreadManagerMode::Asynchronous};
const bool displayGui = false;
const bool guiVerbose = false;
const bool fullScreen = false;
// Pose configuration (use WrapperStructPose{} for default and recommended configuration)
const op::WrapperStructPose wrapperStructPose{netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
FLAGS_num_scales, (float)FLAGS_scale_gap, gflagToRenderMode(FLAGS_render_pose), poseModel,
!FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
......@@ -347,8 +359,12 @@ int openPoseTutorialWrapper1()
const op::WrapperStructFace wrapperStructFace{FLAGS_face, faceNetInputSize, gflagToRenderMode(FLAGS_render_face, FLAGS_render_pose),
(float)FLAGS_alpha_face, (float)FLAGS_alpha_heatmap_face};
// Hand configuration (use op::WrapperStructHand{} to disable it)
const op::experimental::WrapperStructHand wrapperStructHand{false};
const op::WrapperStructHand wrapperStructHand{FLAGS_hand, handNetInputSize, gflagToRenderMode(FLAGS_render_hand, FLAGS_render_pose),
(float)FLAGS_alpha_hand, (float)FLAGS_alpha_heatmap_hand};
// Consumer (comment or use default argument to disable any output)
const bool displayGui = false;
const bool guiVerbose = false;
const bool fullScreen = false;
const op::WrapperStructOutput wrapperStructOutput{displayGui, guiVerbose, fullScreen, FLAGS_write_keypoint,
op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_keypoint_json,
FLAGS_write_coco_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video,
......
......@@ -49,7 +49,7 @@ DEFINE_int32(logging_level, 3, "The logging level. Inte
// Producer
DEFINE_string(image_dir, "examples/media/", "Process a directory of images.");
// OpenPose
DEFINE_string(model_folder, "models/", "Folder where the pose models (COCO and MPI) are located.");
DEFINE_string(model_folder, "models/", "Folder path (absolute or relative) where the models (pose, face, ...) are located.");
DEFINE_string(resolution, "1280x720", "The image resolution (display and output). Use \"-1x-1\" to force the program to use the"
" default images resolution.");
DEFINE_int32(num_gpu, -1, "The number of GPU devices to use. If negative, it will use all the available GPUs in your"
......@@ -86,6 +86,9 @@ DEFINE_string(face_net_resolution, "368x368", "Multiples of 16. Analog
" 320x320 usually works fine while giving a substantial speed up when multiple faces on the"
" image.");
// OpenPose Hand
DEFINE_bool(hand, false, "Enables hand keypoint detection. It will share some parameters from the body pose, e.g."
" `model_folder`.");
DEFINE_string(hand_net_resolution, "368x368", "Multiples of 16. Analogous to `net_resolution` but applied to the hand keypoint detector.");
// OpenPose Rendering
DEFINE_int32(part_to_show, 0, "Part to show from the start.");
DEFINE_bool(disable_blending, false, "If blending is enabled, it will merge the results with the original frame. If disabled, it"
......@@ -104,13 +107,18 @@ DEFINE_int32(render_face, -1, "Analogous to `render_po
" configuration that `render_pose` is using.");
DEFINE_double(alpha_face, 0.6, "Analogous to `alpha_pose` but applied to face.");
DEFINE_double(alpha_heatmap_face, 0.7, "Analogous to `alpha_heatmap` but applied to face.");
// OpenPose Rendering Hand
DEFINE_int32(render_hand, -1, "Analogous to `render_pose` but applied to the hand. Extra option: -1 to use the same"
" configuration that `render_pose` is using.");
DEFINE_double(alpha_hand, 0.6, "Analogous to `alpha_pose` but applied to hand.");
DEFINE_double(alpha_heatmap_hand, 0.7, "Analogous to `alpha_heatmap` but applied to hand.");
// Result Saving
DEFINE_string(write_images, "", "Directory to write rendered frames in `write_images_format` image format.");
DEFINE_string(write_images_format, "png", "File extension and format for `write_images`, e.g. png, jpg or bmp. Check the OpenCV"
" function cv::imwrite for all compatible extensions.");
DEFINE_string(write_video, "", "Full file path to write rendered frames in motion JPEG video format. It might fail if the"
" final path does not finish in `.avi`. It internally uses cv::VideoWriter.");
DEFINE_string(write_keypoint, "", "Directory to write the people pose keypoint data. Format with `write_keypoint_format`.");
DEFINE_string(write_keypoint, "", "Directory to write the people body pose keypoint data. Set format with `write_keypoint_format`.");
DEFINE_string(write_keypoint_format, "yml", "File extension and format for `write_keypoint`: json, xml, yaml & yml. Json not available"
" for OpenCV < 3.0, use `write_keypoint_json` instead.");
DEFINE_string(write_keypoint_json, "", "Directory to write people pose data in *.json format, compatible with any OpenCV version.");
......@@ -326,7 +334,7 @@ op::RenderMode gflagToRenderMode(const int renderFlag, const int renderPoseFlag
}
// Google flags into program variables
std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, op::PoseModel, op::ScaleMode, std::vector<op::HeatMapType>,
std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, op::Point<int>, op::PoseModel, op::ScaleMode, std::vector<op::HeatMapType>,
op::ScaleMode> gflagsToOpParameters()
{
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
......@@ -345,6 +353,11 @@ std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, op::PoseModel, op::Sc
nRead = sscanf(FLAGS_face_net_resolution.c_str(), "%dx%d", &faceNetInputSize.x, &faceNetInputSize.y);
op::checkE(nRead, 2, "Error, face net resolution format (" + FLAGS_face_net_resolution
+ ") invalid, should be e.g., 368x368 (multiples of 16)", __LINE__, __FUNCTION__, __FILE__);
// handNetInputSize
op::Point<int> handNetInputSize;
nRead = sscanf(FLAGS_hand_net_resolution.c_str(), "%dx%d", &handNetInputSize.x, &handNetInputSize.y);
op::checkE(nRead, 2, "Error, hand net resolution format (" + FLAGS_hand_net_resolution
+ ") invalid, should be e.g., 368x368 (multiples of 16)", __LINE__, __FUNCTION__, __FILE__);
// poseModel
const auto poseModel = gflagToPoseModel(FLAGS_model_pose);
// keypointScale
......@@ -355,7 +368,7 @@ std::tuple<op::Point<int>, op::Point<int>, op::Point<int>, op::PoseModel, op::Sc
const auto heatMapScale = (FLAGS_heatmaps_scale == 0 ? op::ScaleMode::PlusMinusOne
: (FLAGS_heatmaps_scale == 1 ? op::ScaleMode::ZeroToOne : op::ScaleMode::UnsignedChar ));
// return
return std::make_tuple(outputSize, netInputSize, faceNetInputSize, poseModel, keypointScale, heatMapTypes, heatMapScale);
return std::make_tuple(outputSize, netInputSize, faceNetInputSize, handNetInputSize, poseModel, keypointScale, heatMapTypes, heatMapScale);
}
int openPoseTutorialWrapper2()
......@@ -372,11 +385,12 @@ int openPoseTutorialWrapper2()
op::Point<int> outputSize;
op::Point<int> netInputSize;
op::Point<int> faceNetInputSize;
op::Point<int> handNetInputSize;
op::PoseModel poseModel;
op::ScaleMode keypointScale;
std::vector<op::HeatMapType> heatMapTypes;
op::ScaleMode heatMapScale;
std::tie(outputSize, netInputSize, faceNetInputSize, poseModel, keypointScale, heatMapTypes, heatMapScale) = gflagsToOpParameters();
std::tie(outputSize, netInputSize, faceNetInputSize, handNetInputSize, poseModel, keypointScale, heatMapTypes, heatMapScale) = gflagsToOpParameters();
op::log("", op::Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Initializing the user custom classes
......@@ -398,9 +412,6 @@ int openPoseTutorialWrapper2()
const auto workerOutputOnNewThread = true;
opWrapper.setWorkerOutput(wUserOutput, workerOutputOnNewThread);
// Configure OpenPose
const bool displayGui = false;
const bool guiVerbose = false;
const bool fullScreen = false;
const op::WrapperStructPose wrapperStructPose{netInputSize, outputSize, keypointScale, FLAGS_num_gpu, FLAGS_num_gpu_start,
FLAGS_num_scales, (float)FLAGS_scale_gap, gflagToRenderMode(FLAGS_render_pose), poseModel,
!FLAGS_disable_blending, (float)FLAGS_alpha_pose, (float)FLAGS_alpha_heatmap,
......@@ -409,8 +420,12 @@ int openPoseTutorialWrapper2()
const op::WrapperStructFace wrapperStructFace{FLAGS_face, faceNetInputSize, gflagToRenderMode(FLAGS_render_face, FLAGS_render_pose),
(float)FLAGS_alpha_face, (float)FLAGS_alpha_heatmap_face};
// Hand configuration (use op::WrapperStructHand{} to disable it)
const op::experimental::WrapperStructHand wrapperStructHand{false};
const op::WrapperStructHand wrapperStructHand{FLAGS_hand, handNetInputSize, gflagToRenderMode(FLAGS_render_hand, FLAGS_render_pose),
(float)FLAGS_alpha_hand, (float)FLAGS_alpha_heatmap_hand};
// Consumer (comment or use default argument to disable any output)
const bool displayGui = false;
const bool guiVerbose = false;
const bool fullScreen = false;
const op::WrapperStructOutput wrapperStructOutput{displayGui, guiVerbose, fullScreen, FLAGS_write_keypoint,
op::stringToDataFormat(FLAGS_write_keypoint_format), FLAGS_write_keypoint_json,
FLAGS_write_coco_json, FLAGS_write_images, FLAGS_write_images_format, FLAGS_write_video,
......
......@@ -120,27 +120,27 @@ namespace op
// -------------------------------------------------- Basic Operators -------------------------------------------------- //
Point<T>& operator+=(const Point<T>& point);
Point<T> operator+(const Point<T>& point);
Point<T> operator+(const Point<T>& point) const;
Point<T>& operator+=(const T value);
Point<T> operator+(const T value);
Point<T> operator+(const T value) const;
Point<T>& operator-=(const Point<T>& point);
Point<T> operator-(const Point<T>& point);
Point<T> operator-(const Point<T>& point) const;
Point<T>& operator-=(const T value);
Point<T> operator-(const T value);
Point<T> operator-(const T value) const;
Point<T>& operator*=(const T value);
Point<T> operator*(const T value);
Point<T> operator*(const T value) const;
Point<T>& operator/=(const T value);
Point<T> operator/(const T value);
Point<T> operator/(const T value) const;
};
}
......
......@@ -64,11 +64,11 @@ namespace op
// -------------------------------------------------- Basic Operators -------------------------------------------------- //
Rectangle<T>& operator*=(const T value);
Rectangle<T> operator*(const T value);
Rectangle<T> operator*(const T value) const;
Rectangle<T>& operator/=(const T value);
Rectangle<T> operator/(const T value);
Rectangle<T> operator/(const T value) const;
};
}
......
#ifndef OPENPOSE_HAND_HAND_EXTRACTOR_HPP
#define OPENPOSE_HAND_HAND_EXTRACTOR_HPP
#include <array>
#include <atomic>
#include <memory> // std::shared_ptr
#include <thread>
#include <opencv2/core/core.hpp> // cv::Mat
#include <openpose/core/array.hpp>
#include <openpose/core/point.hpp>
#include <openpose/core/net.hpp>
#include <openpose/core/nmsCaffe.hpp>
#include <openpose/core/resizeAndMergeCaffe.hpp>
#include <openpose/pose/enumClasses.hpp>
#include "enumClasses.hpp"
namespace op
{
namespace experimental
{
class HandExtractor
{
public:
explicit HandExtractor(const std::string& modelFolder, const int gpuId, const PoseModel poseModel);
void initializationOnThread();
void forwardPass(const Array<float>& poseKeypoints, const cv::Mat& cvInputData);
Array<float> getHandKeypoints() const;
double get(const HandsProperty property) const;
void set(const HandsProperty property, const double value);
void increase(const HandsProperty property, const double value);
private:
const Point<int> mNetOutputSize;
const Point<int> mOutputSize;
const unsigned int mRWrist;
const unsigned int mRElbow;
const unsigned int mLWrist;
const unsigned int mLElbow;
const unsigned int mNeck;
const unsigned int mHeadNose;
std::array<std::atomic<double>, (int)HandsProperty::Size> mProperties;
std::shared_ptr<Net> spNet;
std::shared_ptr<ResizeAndMergeCaffe<float>> spResizeAndMergeCaffe;
std::shared_ptr<NmsCaffe<float>> spNmsCaffe;
Array<float> mLeftHandCrop;
Array<float> mRightHandCrop;
Array<float> mHands;
float mScaleLeftHand;
float mScaleRightHand;
// Init with thread
boost::shared_ptr<caffe::Blob<float>> spCaffeNetOutputBlob;
std::shared_ptr<caffe::Blob<float>> spHeatMapsBlob;
std::shared_ptr<caffe::Blob<float>> spPeaksBlob;
std::thread::id mThreadId;
void checkThread() const;
DELETE_COPY(HandExtractor);
};
}
}
#endif // OPENPOSE_HAND_HAND_EXTRACTOR_HPP
#ifndef OPENPOSE_HAND_HAND_RENDERER_HPP
#define OPENPOSE_HAND_HAND_RENDERER_HPP
#include <openpose/core/array.hpp>
#include <openpose/core/enumClasses.hpp>
#include <openpose/core/point.hpp>
#include <openpose/core/renderer.hpp>
#include <openpose/thread/worker.hpp>
#include "handParameters.hpp"
namespace op
{
namespace experimental
{
class HandRenderer : public Renderer
{
public:
HandRenderer(const Point<int>& frameSize, const float alphaKeypoint = HAND_DEFAULT_ALPHA_KEYPOINT,
const float alphaHeatMap = HAND_DEFAULT_ALPHA_HEAT_MAP, const RenderMode renderMode = RenderMode::Cpu);
~HandRenderer();
void initializationOnThread();
void renderHand(Array<float>& outputData, const Array<float>& handKeypoints);
private:
const Point<int> mFrameSize;
const RenderMode mRenderMode;
float* pGpuHands; // GPU aux memory
void renderHandCpu(Array<float>& outputData, const Array<float>& handKeypoints);
void renderHandGpu(Array<float>& outputData, const Array<float>& handKeypoints);
DELETE_COPY(HandRenderer);
};
}
}
#endif // OPENPOSE_HAND_HAND_RENDERER_HPP
#ifndef OPENPOSE_HAND_W_HAND_EXTRACTOR_HPP
#define OPENPOSE_HAND_W_HAND_EXTRACTOR_HPP
#include <memory> // std::shared_ptr
#include <openpose/thread/worker.hpp>
#include "handRenderer.hpp"
namespace op
{
namespace experimental
{
template<typename TDatums>
class WHandExtractor : public Worker<TDatums>
{
public:
explicit WHandExtractor(const std::shared_ptr<HandExtractor>& handExtractor);
void initializationOnThread();
void work(TDatums& tDatums);
private:
std::shared_ptr<HandExtractor> spHandExtractor;
DELETE_COPY(WHandExtractor);
};
}
}
// Implementation
#include <openpose/utilities/errorAndLog.hpp>
#include <openpose/utilities/macros.hpp>
#include <openpose/utilities/pointerContainer.hpp>
#include <openpose/utilities/profiler.hpp>
namespace op
{
namespace experimental
{
template<typename TDatums>
WHandExtractor<TDatums>::WHandExtractor(const std::shared_ptr<HandExtractor>& handExtractor) :
spHandExtractor{handExtractor}
{
}
template<typename TDatums>
void WHandExtractor<TDatums>::initializationOnThread()
{
spHandExtractor->initializationOnThread();
}
template<typename TDatums>
void WHandExtractor<TDatums>::work(TDatums& tDatums)
{
try
{
if (checkNoNullNorEmpty(tDatums))
{
// Debugging log
dLog("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Profiling speed
const auto profilerKey = Profiler::timerInit(__LINE__, __FUNCTION__, __FILE__);
// Extract people hands
for (auto& tDatum : *tDatums)
{
spHandExtractor->forwardPass(tDatum.poseKeypoints, tDatum.cvInputData);
tDatum.handKeypoints = spHandExtractor->getHandKeypoints();
}
// Profiling speed
Profiler::timerEnd(profilerKey);
Profiler::printAveragedTimeMsOnIterationX(profilerKey, __LINE__, __FUNCTION__, __FILE__, Profiler::DEFAULT_X);
// Debugging log
dLog("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
}
}
catch (const std::exception& e)
{
this->stop();
tDatums = nullptr;
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
COMPILE_TEMPLATE_DATUM(WHandExtractor);
}
}
#endif // OPENPOSE_HAND_W_HAND_EXTRACTOR_HPP
#ifndef OPENPOSE_HAND_W_HAND_RENDERER_HPP
#define OPENPOSE_HAND_W_HAND_RENDERER_HPP
#include <memory> // std::shared_ptr
#include <openpose/thread/worker.hpp>
#include "handRenderer.hpp"
namespace op
{
namespace experimental
{
template<typename TDatums>
class WHandRenderer : public Worker<TDatums>
{
public:
explicit WHandRenderer(const std::shared_ptr<HandRenderer>& handRenderer);
void initializationOnThread();
void work(TDatums& tDatums);
private:
std::shared_ptr<HandRenderer> spHandRenderer;
DELETE_COPY(WHandRenderer);
};
}
}
// Implementation
#include <openpose/utilities/errorAndLog.hpp>
#include <openpose/utilities/macros.hpp>
#include <openpose/utilities/pointerContainer.hpp>
#include <openpose/utilities/profiler.hpp>
namespace op
{
namespace experimental
{
template<typename TDatums>
WHandRenderer<TDatums>::WHandRenderer(const std::shared_ptr<HandRenderer>& handRenderer) :
spHandRenderer{handRenderer}
{
}
template<typename TDatums>
void WHandRenderer<TDatums>::initializationOnThread()
{
spHandRenderer->initializationOnThread();
}
template<typename TDatums>
void WHandRenderer<TDatums>::work(TDatums& tDatums)
{
try
{
if (checkNoNullNorEmpty(tDatums))
{
// Debugging log
dLog("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Profiling speed
const auto profilerKey = Profiler::timerInit(__LINE__, __FUNCTION__, __FILE__);
// Render people hands
for (auto& tDatum : *tDatums)
spHandRenderer->renderHand(tDatum.outputData, tDatum.handKeypoints);
// Profiling speed
Profiler::timerEnd(profilerKey);
Profiler::printAveragedTimeMsOnIterationX(profilerKey, __LINE__, __FUNCTION__, __FILE__, Profiler::DEFAULT_X);
// Debugging log
dLog("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
}
}
catch (const std::exception& e)
{
this->stop();
tDatums = nullptr;
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
COMPILE_TEMPLATE_DATUM(WHandRenderer);
}
}
#endif // OPENPOSE_HAND_W_HAND_RENDERER_HPP
#ifndef OPENPOSE_EXPERIMENTAL_HEADERS_HPP
#define OPENPOSE_EXPERIMENTAL_HEADERS_HPP
// hands module
#include "hand/headers.hpp"
// producer module
// #include "producer/headers.hpp"
......
......@@ -15,7 +15,7 @@ namespace op
public:
explicit FaceDetector(const PoseModel poseModel);
std::vector<Rectangle<float>> detectFaces(const Array<float>& poseKeypoints, const float scaleInputToOutput);
std::vector<Rectangle<float>> detectFaces(const Array<float>& poseKeypoints, const float scaleInputToOutput) const;
private:
const unsigned int mNeck;
......
......@@ -3,7 +3,7 @@
namespace op
{
enum class HandsProperty : bool
enum class HandProperty : bool
{
NMSThreshold = 0,
Size,
......
#ifndef OPENPOSE_HAND_HAND_DETECTOR_HPP
#define OPENPOSE_HAND_HAND_DETECTOR_HPP
#include <array>
#include <vector>
#include <openpose/core/array.hpp>
#include <openpose/core/point.hpp>
#include <openpose/core/rectangle.hpp>
#include <openpose/pose/enumClasses.hpp>
#include <openpose/utilities/macros.hpp>
#include "enumClasses.hpp"
namespace op
{
class HandDetector
{
public:
explicit HandDetector(const PoseModel poseModel);
std::vector<std::array<Rectangle<float>, 2>> detectHands(const Array<float>& poseKeypoints, const float scaleInputToOutput) const;
std::vector<std::array<Rectangle<float>, 2>> trackHands(const Array<float>& poseKeypoints, const float scaleInputToOutput);
void updateTracker(const Array<float>& poseKeypoints, const Array<float>& handKeypoints);
private:
enum class PosePart : unsigned int
{
LWrist = 0,
LElbow,
LShoulder,
RWrist,
RElbow,
RShoulder,
Size,
};
const std::array<unsigned int, (int)PosePart::Size> mPoseIndexes;
std::vector<std::array<Point<float>, (int)PosePart::Size>> mPoseTrack;
std::vector<Rectangle<float>> mHandTrack;
std::array<unsigned int, (int)PosePart::Size> getPoseKeypoints(const PoseModel poseModel,
const std::array<std::string, (int)PosePart::Size>& poseStrings);
DELETE_COPY(HandDetector);
};
}
#endif // OPENPOSE_HAND_HAND_DETECTOR_HPP
#ifndef OPENPOSE_HAND_HAND_EXTRACTOR_HPP
#define OPENPOSE_HAND_HAND_EXTRACTOR_HPP
#include <array>
#include <atomic>
#include <memory> // std::shared_ptr
#include <thread>
#include <opencv2/core/core.hpp> // cv::Mat
#include <openpose/core/array.hpp>
#include <openpose/core/point.hpp>
#include <openpose/core/net.hpp>
#include <openpose/core/nmsCaffe.hpp>
#include <openpose/core/rectangle.hpp>
#include <openpose/core/resizeAndMergeCaffe.hpp>
#include <openpose/utilities/macros.hpp>
#include "enumClasses.hpp"
namespace op
{
class HandExtractor
{
public:
explicit HandExtractor(const Point<int>& netInputSize, const Point<int>& netOutputSize, const std::string& modelFolder, const int gpuId);
void initializationOnThread();
void forwardPass(const std::vector<std::array<Rectangle<float>, 2>> handRectangles, const cv::Mat& cvInputData, const float scaleInputToOutput);
Array<float> getHandKeypoints() const;
double get(const HandProperty property) const;
void set(const HandProperty property, const double value);
void increase(const HandProperty property, const double value);
private:
const Point<int> mNetOutputSize;
std::array<std::atomic<double>, (int)HandProperty::Size> mProperties;
std::shared_ptr<Net> spNet;
std::shared_ptr<ResizeAndMergeCaffe<float>> spResizeAndMergeCaffe;
std::shared_ptr<NmsCaffe<float>> spNmsCaffe;
Array<float> mHandImageCrop;
Array<float> mHandKeypoints;
// Init with thread
boost::shared_ptr<caffe::Blob<float>> spCaffeNetOutputBlob;
std::shared_ptr<caffe::Blob<float>> spHeatMapsBlob;
std::shared_ptr<caffe::Blob<float>> spPeaksBlob;
std::thread::id mThreadId;
void checkThread() const;
DELETE_COPY(HandExtractor);
};
}
#endif // OPENPOSE_HAND_HAND_EXTRACTOR_HPP
#ifndef OPENPOSE_HAND_HAND_RENDERER_HPP
#define OPENPOSE_HAND_HAND_RENDERER_HPP
#include <openpose/core/array.hpp>
#include <openpose/core/enumClasses.hpp>
#include <openpose/core/point.hpp>
#include <openpose/core/renderer.hpp>
#include <openpose/thread/worker.hpp>
#include "handParameters.hpp"
namespace op
{
class HandRenderer : public Renderer
{
public:
HandRenderer(const Point<int>& frameSize, const float alphaKeypoint = HAND_DEFAULT_ALPHA_KEYPOINT,
const float alphaHeatMap = HAND_DEFAULT_ALPHA_HEAT_MAP, const RenderMode renderMode = RenderMode::Cpu);
~HandRenderer();
void initializationOnThread();
void renderHand(Array<float>& outputData, const Array<float>& handKeypoints);
private:
const Point<int> mFrameSize;
const RenderMode mRenderMode;
float* pGpuHand; // GPU aux memory
void renderHandCpu(Array<float>& outputData, const Array<float>& handKeypoints);
void renderHandGpu(Array<float>& outputData, const Array<float>& handKeypoints);
DELETE_COPY(HandRenderer);
};
}
#endif // OPENPOSE_HAND_HAND_RENDERER_HPP
......@@ -3,10 +3,12 @@
// hand module
#include "enumClasses.hpp"
#include "handDetector.hpp"
#include "handExtractor.hpp"
#include "handParameters.hpp"
#include "handRenderer.hpp"
#include "renderHand.hpp"
#include "wHandDetector.hpp"
#include "wHandExtractor.hpp"
#include "wHandRenderer.hpp"
......
#ifndef OPENPOSE_HAND_W_HAND_DETECTOR_HPP
#define OPENPOSE_HAND_W_HAND_DETECTOR_HPP
#include <memory> // std::shared_ptr
#include <openpose/thread/worker.hpp>
#include "handRenderer.hpp"
namespace op
{
template<typename TDatums>
class WHandDetector : public Worker<TDatums>
{
public:
explicit WHandDetector(const std::shared_ptr<HandDetector>& handDetector);
void initializationOnThread();
void work(TDatums& tDatums);
private:
std::shared_ptr<HandDetector> spHandDetector;
DELETE_COPY(WHandDetector);
};
}
// Implementation
#include <openpose/utilities/errorAndLog.hpp>
#include <openpose/utilities/macros.hpp>
#include <openpose/utilities/pointerContainer.hpp>
#include <openpose/utilities/profiler.hpp>
namespace op
{
template<typename TDatums>
WHandDetector<TDatums>::WHandDetector(const std::shared_ptr<HandDetector>& handDetector) :
spHandDetector{handDetector}
{
}
template<typename TDatums>
void WHandDetector<TDatums>::initializationOnThread()
{
}
template<typename TDatums>
void WHandDetector<TDatums>::work(TDatums& tDatums)
{
try
{
if (checkNoNullNorEmpty(tDatums))
{
// Debugging log
dLog("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Profiling speed
const auto profilerKey = Profiler::timerInit(__LINE__, __FUNCTION__, __FILE__);
// Detect people hand
for (auto& tDatum : *tDatums)
tDatum.handRectangles = spHandDetector->detectHands(tDatum.poseKeypoints, tDatum.scaleInputToOutput);
// Profiling speed
Profiler::timerEnd(profilerKey);
Profiler::printAveragedTimeMsOnIterationX(profilerKey, __LINE__, __FUNCTION__, __FILE__, Profiler::DEFAULT_X);
// Debugging log
dLog("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
}
}
catch (const std::exception& e)
{
this->stop();
tDatums = nullptr;
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
COMPILE_TEMPLATE_DATUM(WHandDetector);
}
#endif // OPENPOSE_HAND_W_HAND_DETECTOR_HPP
#ifndef OPENPOSE_HAND_W_HAND_EXTRACTOR_HPP
#define OPENPOSE_HAND_W_HAND_EXTRACTOR_HPP
#include <memory> // std::shared_ptr
#include <openpose/thread/worker.hpp>
#include "handRenderer.hpp"
namespace op
{
template<typename TDatums>
class WHandExtractor : public Worker<TDatums>
{
public:
explicit WHandExtractor(const std::shared_ptr<HandExtractor>& handExtractor);
void initializationOnThread();
void work(TDatums& tDatums);
private:
std::shared_ptr<HandExtractor> spHandExtractor;
DELETE_COPY(WHandExtractor);
};
}
// Implementation
#include <openpose/utilities/errorAndLog.hpp>
#include <openpose/utilities/macros.hpp>
#include <openpose/utilities/pointerContainer.hpp>
#include <openpose/utilities/profiler.hpp>
namespace op
{
template<typename TDatums>
WHandExtractor<TDatums>::WHandExtractor(const std::shared_ptr<HandExtractor>& handExtractor) :
spHandExtractor{handExtractor}
{
}
template<typename TDatums>
void WHandExtractor<TDatums>::initializationOnThread()
{
spHandExtractor->initializationOnThread();
}
template<typename TDatums>
void WHandExtractor<TDatums>::work(TDatums& tDatums)
{
try
{
if (checkNoNullNorEmpty(tDatums))
{
// Debugging log
dLog("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Profiling speed
const auto profilerKey = Profiler::timerInit(__LINE__, __FUNCTION__, __FILE__);
// Extract people hands
for (auto& tDatum : *tDatums)
{
spHandExtractor->forwardPass(tDatum.handRectangles, tDatum.cvInputData, tDatum.scaleInputToOutput);
tDatum.handKeypoints = spHandExtractor->getHandKeypoints();
}
// Profiling speed
Profiler::timerEnd(profilerKey);
Profiler::printAveragedTimeMsOnIterationX(profilerKey, __LINE__, __FUNCTION__, __FILE__, Profiler::DEFAULT_X);
// Debugging log
dLog("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
}
}
catch (const std::exception& e)
{
this->stop();
tDatums = nullptr;
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
COMPILE_TEMPLATE_DATUM(WHandExtractor);
}
#endif // OPENPOSE_HAND_W_HAND_EXTRACTOR_HPP
#ifndef OPENPOSE_HAND_W_HAND_RENDERER_HPP
#define OPENPOSE_HAND_W_HAND_RENDERER_HPP
#include <memory> // std::shared_ptr
#include <openpose/thread/worker.hpp>
#include "handRenderer.hpp"
namespace op
{
template<typename TDatums>
class WHandRenderer : public Worker<TDatums>
{
public:
explicit WHandRenderer(const std::shared_ptr<HandRenderer>& handRenderer);
void initializationOnThread();
void work(TDatums& tDatums);
private:
std::shared_ptr<HandRenderer> spHandRenderer;
DELETE_COPY(WHandRenderer);
};
}
// Implementation
#include <openpose/utilities/errorAndLog.hpp>
#include <openpose/utilities/macros.hpp>
#include <openpose/utilities/pointerContainer.hpp>
#include <openpose/utilities/profiler.hpp>
namespace op
{
template<typename TDatums>
WHandRenderer<TDatums>::WHandRenderer(const std::shared_ptr<HandRenderer>& handRenderer) :
spHandRenderer{handRenderer}
{
}
template<typename TDatums>
void WHandRenderer<TDatums>::initializationOnThread()
{
spHandRenderer->initializationOnThread();
}
template<typename TDatums>
void WHandRenderer<TDatums>::work(TDatums& tDatums)
{
try
{
if (checkNoNullNorEmpty(tDatums))
{
// Debugging log
dLog("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Profiling speed
const auto profilerKey = Profiler::timerInit(__LINE__, __FUNCTION__, __FILE__);
// Render people hands
for (auto& tDatum : *tDatums)
spHandRenderer->renderHand(tDatum.outputData, tDatum.handKeypoints);
// Profiling speed
Profiler::timerEnd(profilerKey);
Profiler::printAveragedTimeMsOnIterationX(profilerKey, __LINE__, __FUNCTION__, __FILE__, Profiler::DEFAULT_X);
// Debugging log
dLog("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
}
}
catch (const std::exception& e)
{
this->stop();
tDatums = nullptr;
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
COMPILE_TEMPLATE_DATUM(WHandRenderer);
}
#endif // OPENPOSE_HAND_W_HAND_RENDERER_HPP
......@@ -83,7 +83,7 @@ namespace op
// Similar to the previos configure, but it includes hand extraction and rendering
void configure(const WrapperStructPose& wrapperStructPose,
// Hand (use the default WrapperStructHand{} to disable any hand detector)
const experimental::WrapperStructHand& wrapperStructHand,
const WrapperStructHand& wrapperStructHand,
// Producer (set producerSharedPtr = nullptr or use the default WrapperStructInput{} to disable any input)
const WrapperStructInput& wrapperStructInput,
// Consumer (keep default values to disable any output)
......@@ -103,7 +103,7 @@ namespace op
// Face (use the default WrapperStructFace{} to disable any face detector)
const WrapperStructFace& wrapperStructFace = WrapperStructFace{},
// Hand (use the default WrapperStructHand{} to disable any hand detector)
const experimental::WrapperStructHand& wrapperStructHand = experimental::WrapperStructHand{},
const WrapperStructHand& wrapperStructHand = WrapperStructHand{},
// Producer (set producerSharedPtr = nullptr or use the default WrapperStructInput{} to disable any input)
const WrapperStructInput& wrapperStructInput = WrapperStructInput{},
// Consumer (keep default values to disable any output)
......@@ -260,10 +260,10 @@ namespace op
// Implementation
#include <openpose/core/headers.hpp>
#include <openpose/experimental/headers.hpp>
#include <openpose/face/headers.hpp>
#include <openpose/filestream/headers.hpp>
#include <openpose/gui/headers.hpp>
#include <openpose/hand/headers.hpp>
#include <openpose/pose/headers.hpp>
#include <openpose/producer/headers.hpp>
#include <openpose/utilities/cuda.hpp>
......@@ -375,7 +375,7 @@ namespace op
{
try
{
configure(wrapperStructPose, WrapperStructFace{}, experimental::WrapperStructHand{},
configure(wrapperStructPose, WrapperStructFace{}, WrapperStructHand{},
wrapperStructInput, wrapperStructOutput);
}
catch (const std::exception& e)
......@@ -392,7 +392,7 @@ namespace op
{
try
{
configure(wrapperStructPose, wrapperStructFace, experimental::WrapperStructHand{},
configure(wrapperStructPose, wrapperStructFace, WrapperStructHand{},
wrapperStructInput, wrapperStructOutput);
}
catch (const std::exception& e)
......@@ -403,7 +403,7 @@ namespace op
template<typename TDatums, typename TWorker, typename TQueue>
void Wrapper<TDatums, TWorker, TQueue>::configure(const WrapperStructPose& wrapperStructPose,
const experimental::WrapperStructHand& wrapperStructHand,
const WrapperStructHand& wrapperStructHand,
const WrapperStructInput& wrapperStructInput,
const WrapperStructOutput& wrapperStructOutput)
{
......@@ -421,7 +421,7 @@ namespace op
template<typename TDatums, typename TWorker, typename TQueue>
void Wrapper<TDatums, TWorker, TQueue>::configure(const WrapperStructPose& wrapperStructPose,
const WrapperStructFace& wrapperStructFace,
const experimental::WrapperStructHand& wrapperStructHand,
const WrapperStructHand& wrapperStructHand,
const WrapperStructInput& wrapperStructInput,
const WrapperStructOutput& wrapperStructOutput)
{
......@@ -433,13 +433,18 @@ namespace op
typedef std::shared_ptr<TDatums> TDatumsPtr;
// Required parameters
const auto renderOutput = wrapperStructPose.renderMode != RenderMode::None || wrapperStructFace.renderMode != RenderMode::None;
const auto renderOutputGpu = wrapperStructPose.renderMode == RenderMode::Gpu || wrapperStructFace.renderMode == RenderMode::Gpu;
const auto renderOutput = wrapperStructPose.renderMode != RenderMode::None || wrapperStructFace.renderMode != RenderMode::None
|| wrapperStructHand.renderMode != RenderMode::None;
const auto renderOutputGpu = wrapperStructPose.renderMode == RenderMode::Gpu || wrapperStructFace.renderMode == RenderMode::Gpu
|| wrapperStructHand.renderMode == RenderMode::Gpu;
const auto renderFace = wrapperStructFace.enable && wrapperStructFace.renderMode != RenderMode::None;
const auto renderHand = wrapperStructHand.enable && wrapperStructHand.renderMode != RenderMode::None;
const auto renderHandGpu = wrapperStructHand.enable && wrapperStructHand.renderMode == RenderMode::Gpu;
// Check no wrong/contradictory flags enabled
if (wrapperStructPose.alphaKeypoint < 0. || wrapperStructPose.alphaKeypoint > 1.
|| wrapperStructFace.alphaHeatMap < 0. || wrapperStructFace.alphaHeatMap > 1.)
|| wrapperStructFace.alphaHeatMap < 0. || wrapperStructFace.alphaHeatMap > 1.
|| wrapperStructHand.alphaHeatMap < 0. || wrapperStructHand.alphaHeatMap > 1.)
error("Alpha value for blending must be in the range [0,1].", __LINE__, __FUNCTION__, __FILE__);
if (wrapperStructPose.scaleGap <= 0.f && wrapperStructPose.scalesNumber > 1)
error("The scale gap must be greater than 0 (it has no effect if the number of scales is 1).", __LINE__, __FUNCTION__, __FILE__);
......@@ -575,7 +580,7 @@ namespace op
std::vector<TWorker> cpuRenderers;
if (renderOutputGpu || wrapperStructPose.renderMode == RenderMode::Cpu)
{
// If !wrapperStructPose.renderMode == RenderMode::Gpu but renderOutput, then we create an alpha = 0 pose renderer
// If wrapperStructPose.renderMode != RenderMode::Gpu but renderOutput, then we create an alpha = 0 pose renderer
// in order to keep the removing background option
const auto alphaKeypoint = (wrapperStructPose.renderMode != RenderMode::None ? wrapperStructPose.alphaKeypoint : 0.f);
const auto alphaHeatMap = (wrapperStructPose.renderMode != RenderMode::None ? wrapperStructPose.alphaHeatMap : 0.f);
......@@ -601,8 +606,8 @@ namespace op
);
cpuRenderers.emplace_back(std::make_shared<WPoseRenderer<TDatumsPtr>>(poseCpuRenderer));
}
log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
}
log("", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Input cvMat to OpenPose format
const auto cvMatToOpInput = std::make_shared<CvMatToOpInput>(
......@@ -635,19 +640,24 @@ namespace op
}
// Hand extractor(s)
if (wrapperStructHand.extractAndRenderHands)
if (wrapperStructHand.enable)
{
for (auto gpuId = 0; gpuId < spWPoses.size(); gpuId++)
{
const auto handExtractor = std::make_shared<experimental::HandExtractor>(
wrapperStructPose.modelFolder, gpuId + gpuNumberStart, wrapperStructPose.poseModel
// Hand detector
const auto handDetector = std::make_shared<HandDetector>(wrapperStructPose.poseModel);
spWPoses.at(gpuId).emplace_back(std::make_shared<WHandDetector<TDatumsPtr>>(handDetector));
// Hand keypoint extractor
const auto netOutputSize = wrapperStructHand.netInputSize;
const auto handExtractor = std::make_shared<HandExtractor>(
wrapperStructHand.netInputSize, netOutputSize, wrapperStructPose.modelFolder, gpuId + gpuNumberStart
);
spWPoses.at(gpuId).emplace_back(std::make_shared<experimental::WHandExtractor<TDatumsPtr>>(handExtractor));
spWPoses.at(gpuId).emplace_back(std::make_shared<WHandExtractor<TDatumsPtr>>(handExtractor));
}
}
// Pose renderer(s)
if (renderOutputGpu && !poseRenderers.empty())
if (!poseRenderers.empty())
for (auto i = 0; i < spWPoses.size(); i++)
spWPoses.at(i).emplace_back(std::make_shared<WPoseRenderer<TDatumsPtr>>(poseRenderers.at(i)));
......@@ -676,7 +686,7 @@ namespace op
// Performance boost -> share spGpuMemoryPtr for all renderers
if (!poseRenderers.empty())
{
const bool isLastRenderer = (!wrapperStructHand.extractAndRenderHands);
const bool isLastRenderer = !renderHandGpu;
faceRenderer->setSharedParametersAndIfLast(poseRenderers.at(i)->getSharedParameters(), isLastRenderer);
}
// Add worker
......@@ -688,35 +698,36 @@ namespace op
}
// Hands renderer(s)
if (wrapperStructHand.extractAndRenderHands)
if (renderHand)
{
// CPU rendering
// if (wrapperStructHand.renderMode == RenderMode::Cpu)
if (wrapperStructHand.renderMode == RenderMode::Cpu)
{
// Construct hand renderer
const auto handRenderer = std::make_shared<experimental::HandRenderer>(finalOutputSize);
const auto handRenderer = std::make_shared<HandRenderer>(finalOutputSize);
// Add worker
cpuRenderers.emplace_back(std::make_shared<experimental::WHandRenderer<TDatumsPtr>>(handRenderer));
cpuRenderers.emplace_back(std::make_shared<WHandRenderer<TDatumsPtr>>(handRenderer));
}
// GPU rendering
// else if (wrapperStructHand.renderMode == RenderMode::Gpu)
// {
// for (auto i = 0; i < spWPoses.size(); i++)
// {
// // Construct hands renderer
// const auto handRenderer = std::make_shared<experimental::HandRenderer>(finalOutputSize);
// // Performance boost -> share spGpuMemoryPtr for all renderers
// if (!poseRenderers.empty())
// {
// const bool isLastRenderer = true;
// handRenderer->setSharedParametersAndIfLast(poseRenderers.at(i)->getSharedParameters(), isLastRenderer);
// }
// // Add worker
// spWPoses.at(i).emplace_back(std::make_shared<experimental::WHandRenderer<TDatumsPtr>>(handRenderer));
// }
// }
// else
// error("Unknown RenderMode.", __LINE__, __FUNCTION__, __FILE__);
else if (wrapperStructHand.renderMode == RenderMode::Gpu)
{
for (auto i = 0; i < spWPoses.size(); i++)
{
// Construct hands renderer
const auto handRenderer = std::make_shared<HandRenderer>(finalOutputSize, wrapperStructFace.alphaKeypoint,
wrapperStructFace.alphaHeatMap);
// Performance boost -> share spGpuMemoryPtr for all renderers
if (!poseRenderers.empty())
{
const bool isLastRenderer = true;
handRenderer->setSharedParametersAndIfLast(poseRenderers.at(i)->getSharedParameters(), isLastRenderer);
}
// Add worker
spWPoses.at(i).emplace_back(std::make_shared<WHandRenderer<TDatumsPtr>>(handRenderer));
}
}
else
error("Unknown RenderMode.", __LINE__, __FUNCTION__, __FILE__);
}
// Itermediate workers (e.g. OpenPose format to cv::Mat, json & frames recorder, ...)
......@@ -748,7 +759,7 @@ namespace op
mOutputWs.emplace_back(std::make_shared<WPoseSaver<TDatumsPtr>>(keypointSaver));
if (wrapperStructFace.enable)
mOutputWs.emplace_back(std::make_shared<WFaceSaver<TDatumsPtr>>(keypointSaver));
if (wrapperStructHand.extractAndRenderHands)
if (wrapperStructHand.enable)
mOutputWs.emplace_back(std::make_shared<WHandSaver<TDatumsPtr>>(keypointSaver));
}
// Write people pose data on disk (json format)
......@@ -758,7 +769,7 @@ namespace op
mOutputWs.emplace_back(std::make_shared<WPoseJsonSaver<TDatumsPtr>>(keypointJsonSaver));
if (wrapperStructFace.enable)
mOutputWs.emplace_back(std::make_shared<WFaceJsonSaver<TDatumsPtr>>(keypointJsonSaver));
if (wrapperStructHand.extractAndRenderHands)
if (wrapperStructHand.enable)
mOutputWs.emplace_back(std::make_shared<WHandJsonSaver<TDatumsPtr>>(keypointJsonSaver));
}
// Write people pose data on disk (COCO validation json format)
......
......@@ -15,8 +15,7 @@ namespace op
struct WrapperStructFace
{
/**
* PROVISIONAL PARAMETER. IT WILL BE CHANGED.
* Whether to extract and render face.
* Whether to extract face.
*/
bool enable;
......
#ifndef OPENPOSE_WRAPPER_WRAPPER_STRUCT_HAND_HPP
#define OPENPOSE_WRAPPER_WRAPPER_STRUCT_HAND_HPP
#include <openpose/core/enumClasses.hpp>
#include <openpose/core/point.hpp>
#include <openpose/hand/handParameters.hpp>
namespace op
{
namespace experimental
/**
* WrapperStructHand: Hand estimation and rendering configuration struct.
* WrapperStructHand allows the user to set up the hand estimation and rendering parameters that will be used for the OpenPose Wrapper
* class.
*/
struct WrapperStructHand
{
/**
* WrapperStructHand: Hands estimation and rendering configuration struct.
* DO NOT USE. CODE TO BE FINISHED.
* WrapperStructHand allows the user to set up the hands estimation and rendering parameters that will be used for the OpenPose Wrapper
* class.
* Whether to extract hand.
*/
bool enable;
/**
* CCN (Conv Net) input size.
* The greater, the slower and more memory it will be needed, but it will potentially increase accuracy.
* Both width and height must be divisible by 16.
*/
Point<int> netInputSize;
/**
* Whether to render the output (pose locations, body, background or PAF heat maps) with CPU or GPU.
* Select `None` for no rendering, `Cpu` or `Gpu` por CPU and GPU rendering respectively.
*/
RenderMode renderMode;
/**
* Rendering blending alpha value of the pose point locations with respect to the background image.
* Value in the range [0, 1]. 0 will only render the background, 1 will fully render the pose.
*/
float alphaKeypoint;
/**
* Rendering blending alpha value of the heat maps (hand part, background or PAF) with respect to the background image.
* Value in the range [0, 1]. 0 will only render the background, 1 will only render the heat map.
*/
float alphaHeatMap;
/**
* Constructor of the struct.
* It has the recommended and default values we recommend for each element of the struct.
* Since all the elements of the struct are public, they can also be manually filled.
*/
struct WrapperStructHand
{
/**
* PROVISIONAL PARAMETER. IT WILL BE CHANGED.
* Whether to extract and render hands.
*/
bool extractAndRenderHands;
/**
* Constructor of the struct.
* It has the recommended and default values we recommend for each element of the struct.
* Since all the elements of the struct are public, they can also be manually filled.
*/
WrapperStructHand(const bool extractAndRenderHands = false);
};
}
WrapperStructHand(const bool enable = false, const Point<int>& netInputSize = Point<int>{368, 368},
const RenderMode renderMode = RenderMode::None,
const float alphaKeypoint = HAND_DEFAULT_ALPHA_KEYPOINT,
const float alphaHeatMap = HAND_DEFAULT_ALPHA_HEAT_MAP);
};
}
#endif // OPENPOSE_WRAPPER_WRAPPER_STRUCT_HAND_HPP
......@@ -45,17 +45,17 @@ namespace op
}
template <typename T>
__global__ void writeResultKernel(T* output, const int length, const int* const input, const T* const sourcePtr, const int width, const int maxPeaks)
__global__ void writeResultKernel(T* output, const int length, const int* const kernelPtr, const T* const sourcePtr, const int width, const int height, const int maxPeaks)
{
__shared__ int local[THREADS_PER_BLOCK+1]; // one more
const auto globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (globalIdx < length)
{
local[threadIdx.x] = input[globalIdx];
local[threadIdx.x] = kernelPtr[globalIdx];
//last thread in the block but not globally last, load one more
if (threadIdx.x == THREADS_PER_BLOCK - 1 && globalIdx != length - 1)
local[threadIdx.x+1] = input[globalIdx+1];
local[threadIdx.x+1] = kernelPtr[globalIdx+1];
__syncthreads();
// see difference, except the globally last one
......@@ -63,10 +63,10 @@ namespace op
{
if (local[threadIdx.x] != local[threadIdx.x + 1])
{
//means A[globalIdx] == A[globalIdx + 1] as the input[globalIdx]-th repeat
const auto peakIndex = input[globalIdx]; //0-index
const auto peakLocX = globalIdx % width;
const auto peakLocY = globalIdx / width;
//means A[globalIdx] == A[globalIdx + 1] as the kernelPtr[globalIdx]-th repeat
const auto peakIndex = kernelPtr[globalIdx]; //0-index
const auto peakLocX = (int)(globalIdx % width);
const auto peakLocY = (int)(globalIdx / width);
if (peakIndex < maxPeaks) // limitation
{
......@@ -75,16 +75,16 @@ namespace op
T scoreAcc = 0.f;
for (auto dy = -3 ; dy < 4 ; dy++)
{
if (0 < (peakLocY+dy) && (peakLocY+dy) < width)
const auto y = peakLocY + dy;
if (0 <= y && y < height) // 368
{
for (auto dx = -3 ; dx < 4 ; dx++)
{
if (0 < (peakLocX+dx) && (peakLocX+dx) < width)
const auto x = peakLocX + dx;
if (0 <= x && x < width) // 656
{
const auto score = sourcePtr[(peakLocY+dy)*width + peakLocX+dx];
const auto x = peakLocX+dx;
const auto y = peakLocY+dy;
if (score>0)
const auto score = sourcePtr[y * width + x];
if (score > 0)
{
xAcc += x*score;
yAcc += y*score;
......@@ -95,15 +95,15 @@ namespace op
}
}
const auto output_index = (peakIndex + 1) * 3;
output[output_index] = xAcc / scoreAcc;
output[output_index + 1] = yAcc / scoreAcc;
output[output_index + 2] = sourcePtr[peakLocY*width + peakLocX];
const auto outputIndex = (peakIndex + 1) * 3;
output[outputIndex] = xAcc / scoreAcc;
output[outputIndex + 1] = yAcc / scoreAcc;
output[outputIndex + 2] = sourcePtr[peakLocY*width + peakLocX];
}
}
}
else
output[0] = input[globalIdx]; //number of peaks
output[0] = kernelPtr[globalIdx]; //number of peaks
}
}
......@@ -166,7 +166,7 @@ namespace op
thrust::exclusive_scan(kernelThrustPtr, kernelThrustPtr + imageOffset, kernelThrustPtr); //[0,0,0,0,0,1,1,1,1,1,2,2,2,2]
// This returns targetPtrOffsetted, with the NMS applied over it
writeResultKernel<<<numBlocks1D, threadsPerBlock1D>>>(targetPtrOffsetted, imageOffset, kernelPtrOffsetted, sourcePtrOffsetted, width, maxPeaks);
writeResultKernel<<<numBlocks1D, threadsPerBlock1D>>>(targetPtrOffsetted, imageOffset, kernelPtrOffsetted, sourcePtrOffsetted, width, height, maxPeaks);
}
}
cudaCheck(__LINE__, __FUNCTION__, __FILE__);
......
......@@ -91,7 +91,7 @@ namespace op
}
template<typename T>
Point<T> Point<T>::operator+(const Point<T>& point)
Point<T> Point<T>::operator+(const Point<T>& point) const
{
try
{
......@@ -122,7 +122,7 @@ namespace op
}
template<typename T>
Point<T> Point<T>::operator+(const T value)
Point<T> Point<T>::operator+(const T value) const
{
try
{
......@@ -153,7 +153,7 @@ namespace op
}
template<typename T>
Point<T> Point<T>::operator-(const Point<T>& point)
Point<T> Point<T>::operator-(const Point<T>& point) const
{
try
{
......@@ -184,7 +184,7 @@ namespace op
}
template<typename T>
Point<T> Point<T>::operator-(const T value)
Point<T> Point<T>::operator-(const T value) const
{
try
{
......@@ -215,7 +215,7 @@ namespace op
}
template<typename T>
Point<T> Point<T>::operator*(const T value)
Point<T> Point<T>::operator*(const T value) const
{
try
{
......@@ -246,7 +246,7 @@ namespace op
}
template<typename T>
Point<T> Point<T>::operator/(const T value)
Point<T> Point<T>::operator/(const T value) const
{
try
{
......
......@@ -131,7 +131,7 @@ namespace op
}
template<typename T>
Rectangle<T> Rectangle<T>::operator*(const T value)
Rectangle<T> Rectangle<T>::operator*(const T value) const
{
try
{
......@@ -164,7 +164,7 @@ namespace op
}
template<typename T>
Rectangle<T> Rectangle<T>::operator/(const T value)
Rectangle<T> Rectangle<T>::operator/(const T value) const
{
try
{
......
#include <openpose/experimental/hand/headers.hpp>
namespace op
{
DEFINE_TEMPLATE_DATUM(experimental::WHandExtractor);
DEFINE_TEMPLATE_DATUM(experimental::WHandRenderer);
}
#include <opencv2/opencv.hpp> // CV_WARP_INVERSE_MAP, CV_INTER_LINEAR
#include <openpose/core/netCaffe.hpp>
#include <openpose/experimental/hand/handParameters.hpp>
#include <openpose/pose/poseParameters.hpp>
#include <openpose/utilities/cuda.hpp>
#include <openpose/utilities/errorAndLog.hpp>
#include <openpose/utilities/fastMath.hpp>
#include <openpose/utilities/openCv.hpp>
#include <openpose/experimental/hand/handExtractor.hpp>
// #include <openpose/experimental/hand/handRenderGpu.hpp> // For commented debugging section
namespace op
{
namespace experimental
{
HandExtractor::HandExtractor(const std::string& modelFolder, const int gpuId, const PoseModel poseModel) :
mNetOutputSize{368, 368},
mOutputSize{1280, 720},
mRWrist{poseBodyPartMapStringToKey(poseModel, "RWrist")},
mRElbow{poseBodyPartMapStringToKey(poseModel, "RElbow")},
mLWrist{poseBodyPartMapStringToKey(poseModel, "LWrist")},
mLElbow{poseBodyPartMapStringToKey(poseModel, "LElbow")},
mNeck{poseBodyPartMapStringToKey(poseModel, "Neck")},
mHeadNose{poseBodyPartMapStringToKey(poseModel, std::vector<std::string>{"Nose", "Head"})},
spNet{std::make_shared<NetCaffe>(std::array<int,4>{2, 3, mNetOutputSize.y, mNetOutputSize.x}, modelFolder + HAND_PROTOTXT, modelFolder + HAND_TRAINED_MODEL, gpuId)},
spResizeAndMergeCaffe{std::make_shared<ResizeAndMergeCaffe<float>>()},
spNmsCaffe{std::make_shared<NmsCaffe<float>>()},
mLeftHandCrop{mNetOutputSize.area()*3},
mRightHandCrop{mLeftHandCrop.getSize()},
mScaleLeftHand{100.f},
mScaleRightHand{mScaleLeftHand}
{
try
{
error("Hands extraction is not implemented yet. COMING SOON!", __LINE__, __FUNCTION__, __FILE__);
// Properties
for (auto& property : mProperties)
property = 0.;
mProperties[(int)HandsProperty::NMSThreshold] = HAND_DEFAULT_NMS_THRESHOLD;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandExtractor::initializationOnThread()
{
try
{
log("Starting initialization on thread.", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Get thread id
mThreadId = {std::this_thread::get_id()};
// Caffe net
spNet->initializationOnThread();
spCaffeNetOutputBlob = ((NetCaffe*)spNet.get())->getOutputBlob();
cudaCheck(__LINE__, __FUNCTION__, __FILE__);
// HeatMaps extractor blob and layer
spHeatMapsBlob = {std::make_shared<caffe::Blob<float>>(1,1,1,1)};
const bool mergeFirstDimension = false;
spResizeAndMergeCaffe->Reshape({spCaffeNetOutputBlob.get()}, {spHeatMapsBlob.get()}, HAND_CCN_DECREASE_FACTOR, mergeFirstDimension);
cudaCheck(__LINE__, __FUNCTION__, __FILE__);
// Pose extractor blob and layer
spPeaksBlob = {std::make_shared<caffe::Blob<float>>(1,1,1,1)};
spNmsCaffe->Reshape({spHeatMapsBlob.get()}, {spPeaksBlob.get()}, HAND_MAX_PEAKS, HAND_NUMBER_PARTS+1);
cudaCheck(__LINE__, __FUNCTION__, __FILE__);
log("Finished initialization on thread.", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandExtractor::forwardPass(const Array<float>& poseKeyPoints, const cv::Mat& cvInputData)
{
try
{
UNUSED(poseKeyPoints);
UNUSED(cvInputData);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
Array<float> HandExtractor::getHandKeypoints() const
{
try
{
checkThread();
return mHands;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return Array<float>{};
}
}
double HandExtractor::get(const HandsProperty property) const
{
try
{
return mProperties.at((int)property);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return 0.;
}
}
void HandExtractor::set(const HandsProperty property, const double value)
{
try
{
mProperties.at((int)property) = {value};
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandExtractor::increase(const HandsProperty property, const double value)
{
try
{
mProperties[(int)property] = mProperties.at((int)property) + value;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandExtractor::checkThread() const
{
try
{
if(mThreadId != std::this_thread::get_id())
error("The CPU/GPU pointer data cannot be accessed from a different thread.", __LINE__, __FUNCTION__, __FILE__);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
}
}
#ifndef CPU_ONLY
#include <cuda.h>
#include <cuda_runtime_api.h>
#endif
#include <openpose/experimental/hand/renderHand.hpp>
#include <openpose/utilities/cuda.hpp>
#include <openpose/utilities/errorAndLog.hpp>
#include <openpose/experimental/hand/handRenderer.hpp>
namespace op
{
namespace experimental
{
HandRenderer::HandRenderer(const Point<int>& frameSize, const float alphaKeypoint, const float alphaHeatMap, const RenderMode renderMode) :
Renderer{(unsigned long long)(frameSize.area() * 3), alphaKeypoint, alphaHeatMap},
mFrameSize{frameSize},
mRenderMode{renderMode}
{
}
HandRenderer::~HandRenderer()
{
try
{
// Free CUDA pointers - Note that if pointers are 0 (i.e. nullptr), no operation is performed.
#ifndef CPU_ONLY
cudaFree(pGpuHands);
#endif
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandRenderer::initializationOnThread()
{
try
{
log("Starting initialization on thread.", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
Renderer::initializationOnThread();
// GPU memory allocation for rendering
#ifndef CPU_ONLY
cudaMalloc((void**)(&pGpuHands), 2*HAND_NUMBER_PARTS * 3 * sizeof(float));
#endif
log("Finished initialization on thread.", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandRenderer::renderHand(Array<float>& outputData, const Array<float>& handKeypoints)
{
try
{
// Security checks
if (outputData.empty())
error("Empty Array<float> outputData.", __LINE__, __FUNCTION__, __FILE__);
// CPU rendering
if (mRenderMode == RenderMode::Cpu)
renderHandCpu(outputData, handKeypoints);
// GPU rendering
else
renderHandGpu(outputData, handKeypoints);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandRenderer::renderHandCpu(Array<float>& outputData, const Array<float>& handKeypoints)
{
try
{
renderHandKeypointsCpu(outputData, handKeypoints);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandRenderer::renderHandGpu(Array<float>& outputData, const Array<float>& handKeypoints)
{
try
{
// GPU rendering
#ifndef CPU_ONLY
const auto elementRendered = spElementToRender->load(); // I prefer std::round(T&) over intRound(T) for std::atomic
const auto numberPeople = handKeypoints.getSize(0);
// GPU rendering
if (numberPeople > 0 && elementRendered == 0)
{
cpuToGpuMemoryIfNotCopiedYet(outputData.getPtr());
// Draw handKeypoints
cudaMemcpy(pGpuHands, handKeypoints.getConstPtr(), 2*HAND_NUMBER_PARTS*3 * sizeof(float), cudaMemcpyHostToDevice);
renderHandKeypointsGpu(*spGpuMemoryPtr, mFrameSize, pGpuHands, handKeypoints.getSize(0));
// CUDA check
cudaCheck(__LINE__, __FUNCTION__, __FILE__);
}
// GPU memory to CPU if last renderer
gpuToCpuMemoryIfLastRenderer(outputData.getPtr());
cudaCheck(__LINE__, __FUNCTION__, __FILE__);
// CPU_ONLY mode
#else
error("GPU rendering not available if `CPU_ONLY` is set.", __LINE__, __FUNCTION__, __FILE__);
UNUSED(outputData);
UNUSED(handKeypoints);
#endif
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
}
}
#include <opencv2/opencv.hpp> // CV_WARP_INVERSE_MAP, CV_INTER_LINEAR
#include <openpose/pose/poseParameters.hpp>
#include <openpose/utilities/check.hpp>
#include <openpose/utilities/errorAndLog.hpp>
......@@ -18,7 +17,7 @@ namespace op
}
inline Rectangle<float> getFaceFromPoseKeypoints(const Array<float>& poseKeypoints, const unsigned int personIndex, const unsigned int neck,
const unsigned int nose, const unsigned int lEar, const unsigned int rEar,
const unsigned int headNose, const unsigned int lEar, const unsigned int rEar,
const unsigned int lEye, const unsigned int rEye, const float threshold)
{
try
......@@ -28,7 +27,7 @@ namespace op
const auto* posePtr = &poseKeypoints.at(personIndex*poseKeypoints.getSize(1)*poseKeypoints.getSize(2));
const auto neckScoreAbove = (posePtr[neck*3+2] > threshold);
const auto noseScoreAbove = (posePtr[nose*3+2] > threshold);
const auto headNoseScoreAbove = (posePtr[headNose*3+2] > threshold);
const auto lEarScoreAbove = (posePtr[lEar*3+2] > threshold);
const auto rEarScoreAbove = (posePtr[rEar*3+2] > threshold);
const auto lEyeScoreAbove = (posePtr[lEye*3+2] > threshold);
......@@ -36,45 +35,45 @@ namespace op
auto counter = 0;
// Face and neck given (e.g. MPI)
if (nose == lEar && lEar == rEar)
if (headNose == lEar && lEar == rEar)
{
if (neckScoreAbove && noseScoreAbove)
if (neckScoreAbove && headNoseScoreAbove)
{
pointTopLeft.x = posePtr[nose*3];
pointTopLeft.y = posePtr[nose*3+1];
faceSize = 1.33f * getDistance(posePtr, neck, nose);
pointTopLeft.x = posePtr[headNose*3];
pointTopLeft.y = posePtr[headNose*3+1];
faceSize = 1.33f * getDistance(posePtr, neck, headNose);
}
}
// Face as average between different body keypoints (e.g. COCO)
else
{
// factor * dist(neck, nose)
if (neckScoreAbove && noseScoreAbove)
// factor * dist(neck, headNose)
if (neckScoreAbove && headNoseScoreAbove)
{
// If profile (i.e. only 1 eye and ear visible) --> avg(nose, eye & ear position)
// If profile (i.e. only 1 eye and ear visible) --> avg(headNose, eye & ear position)
if ((lEyeScoreAbove) == (lEarScoreAbove)
&& (rEyeScoreAbove) == (rEarScoreAbove)
&& (lEyeScoreAbove) != (rEyeScoreAbove))
{
if (lEyeScoreAbove)
{
pointTopLeft.x += (posePtr[lEye*3] + posePtr[lEar*3] + posePtr[nose*3]) / 3.f;
pointTopLeft.y += (posePtr[lEye*3+1] + posePtr[lEar*3+1] + posePtr[nose*3+1]) / 3.f;
faceSize += 0.85 * (getDistance(posePtr, nose, lEye) + getDistance(posePtr, nose, lEar) + getDistance(posePtr, neck, nose));
pointTopLeft.x += (posePtr[lEye*3] + posePtr[lEar*3] + posePtr[headNose*3]) / 3.f;
pointTopLeft.y += (posePtr[lEye*3+1] + posePtr[lEar*3+1] + posePtr[headNose*3+1]) / 3.f;
faceSize += 0.85f * (getDistance(posePtr, headNose, lEye) + getDistance(posePtr, headNose, lEar) + getDistance(posePtr, neck, headNose));
}
else // if(lEyeScoreAbove)
{
pointTopLeft.x += (posePtr[rEye*3] + posePtr[rEar*3] + posePtr[nose*3]) / 3.f;
pointTopLeft.y += (posePtr[rEye*3+1] + posePtr[rEar*3+1] + posePtr[nose*3+1]) / 3.f;
faceSize += 0.85 * (getDistance(posePtr, nose, rEye) + getDistance(posePtr, nose, rEar) + getDistance(posePtr, neck, nose));
pointTopLeft.x += (posePtr[rEye*3] + posePtr[rEar*3] + posePtr[headNose*3]) / 3.f;
pointTopLeft.y += (posePtr[rEye*3+1] + posePtr[rEar*3+1] + posePtr[headNose*3+1]) / 3.f;
faceSize += 0.85f * (getDistance(posePtr, headNose, rEye) + getDistance(posePtr, headNose, rEar) + getDistance(posePtr, neck, headNose));
}
}
// else --> 2 * dist(neck, nose)
// else --> 2 * dist(neck, headNose)
else
{
pointTopLeft.x += (posePtr[neck*3] + posePtr[nose*3]) / 2.f;
pointTopLeft.y += (posePtr[neck*3+1] + posePtr[nose*3+1]) / 2.f;
faceSize += 2.f * getDistance(posePtr, neck, nose);
pointTopLeft.x += (posePtr[neck*3] + posePtr[headNose*3]) / 2.f;
pointTopLeft.y += (posePtr[neck*3+1] + posePtr[headNose*3+1]) / 2.f;
faceSize += 2.f * getDistance(posePtr, neck, headNose);
}
counter++;
}
......@@ -110,7 +109,7 @@ namespace op
}
}
std::vector<Rectangle<float>> FaceDetector::detectFaces(const Array<float>& poseKeypoints, const float scaleInputToOutput)
std::vector<Rectangle<float>> FaceDetector::detectFaces(const Array<float>& poseKeypoints, const float scaleInputToOutput) const
{
try
{
......
......@@ -78,6 +78,9 @@ namespace op
if (cvInputData.empty())
error("Empty cvInputData.", __LINE__, __FUNCTION__, __FILE__);
// Fix parameters
const auto netInputSide = fastMin(mNetOutputSize.x, mNetOutputSize.y);
// Set face size
const auto numberPeople = (int)faceRectangles.size();
mFaceKeypoints.reset({numberPeople, (int)FACE_NUMBER_PARTS, 3}, 0);
......@@ -87,8 +90,9 @@ namespace op
// Extract face keypoints for each person
for (auto person = 0 ; person < numberPeople ; person++)
{
const auto& faceRectangle = faceRectangles.at(person);
// Only consider faces with a minimum pixel area
const auto faceAreaSquared = std::sqrt(faceRectangles.at(person).area());
const auto minFaceSize = fastMin(faceRectangle.width, faceRectangle.height);
// // Debugging
// log(std::to_string(cvInputData.cols) + " " + std::to_string(cvInputData.rows));
// cv::rectangle(cvInputDataCopy,
......@@ -96,23 +100,26 @@ namespace op
// cv::Point{(int)faceRectangle.bottomRight().x, (int)faceRectangle.bottomRight().y},
// cv::Scalar{0,0,255}, 2);
// Get parts
if (faceAreaSquared > 50)
if (minFaceSize > 40)
{
const auto& faceRectangle = faceRectangles.at(person);
// Get face position(s)
const Point<float> faceCenterPosition{faceRectangle.topLeft()};
const auto faceSize = fastMax(faceRectangle.width, faceRectangle.height);
// // Debugging
// log(std::to_string(cvInputData.cols) + " " + std::to_string(cvInputData.rows));
// cv::rectangle(cvInputDataCopy,
// cv::Point{(int)faceRectangle.x, (int)faceRectangle.y},
// cv::Point{(int)faceRectangle.bottomRight().x, (int)faceRectangle.bottomRight().y},
// cv::Scalar{0,255,0}, 2);
// Resize and shift image to face rectangle positions
const double scaleFace = faceSize / (double)fastMin(mNetOutputSize.x, mNetOutputSize.y);
const auto faceSize = fastMax(faceRectangle.width, faceRectangle.height);
const double scaleFace = faceSize / (double)netInputSide;
cv::Mat Mscaling = cv::Mat::eye(2, 3, CV_64F);
Mscaling.at<double>(0,0) = scaleFace;
Mscaling.at<double>(1,1) = scaleFace;
Mscaling.at<double>(0,2) = faceCenterPosition.x;
Mscaling.at<double>(1,2) = faceCenterPosition.y;
Mscaling.at<double>(0,2) = faceRectangle.x;
Mscaling.at<double>(1,2) = faceRectangle.y;
cv::Mat faceImage;
cv::warpAffine(cvInputData, faceImage, Mscaling, cv::Size{mNetOutputSize.x, mNetOutputSize.y}, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP, cv::BORDER_CONSTANT, cv::Scalar(0,0,0));
cv::warpAffine(cvInputData, faceImage, Mscaling, cv::Size{mNetOutputSize.x, mNetOutputSize.y},
CV_INTER_LINEAR | CV_WARP_INVERSE_MAP, cv::BORDER_CONSTANT, cv::Scalar(0,0,0));
// cv::Mat -> float*
uCharCvMatToFloatPtr(mFaceImageCrop.getPtr(), faceImage, true);
......@@ -146,7 +153,7 @@ namespace op
const auto* facePeaksPtr = spPeaksBlob->mutable_cpu_data();
const auto facePeaksOffset = (FACE_MAX_PEAKS+1) * 3;
for (auto part = 0 ; part < FACE_NUMBER_PARTS ; part++)
for (auto part = 0 ; part < mFaceKeypoints.getSize(1) ; part++)
{
// Get max peak
const int numPeaks = intRound(facePeaksPtr[facePeaksOffset*part]);
......
#include <openpose/hand/headers.hpp>
namespace op
{
DEFINE_TEMPLATE_DATUM(WHandDetector);
DEFINE_TEMPLATE_DATUM(WHandExtractor);
DEFINE_TEMPLATE_DATUM(WHandRenderer);
}
#include <openpose/pose/poseParameters.hpp>
#include <openpose/utilities/check.hpp>
#include <openpose/utilities/errorAndLog.hpp>
#include <openpose/utilities/fastMath.hpp>
#include <openpose/utilities/keypoint.hpp>
#include <openpose/hand/handDetector.hpp>
namespace op
{
inline std::array<Rectangle<float>, 2> getHandFromPoseIndexes(const Array<float>& poseKeypoints, const unsigned int personIndex, const unsigned int lWrist,
const unsigned int lElbow, const unsigned int lShoulder, const unsigned int rWrist,
const unsigned int rElbow, const unsigned int rShoulder, const float threshold)
{
try
{
std::array<Rectangle<float>, 2> handRectangle;
const auto* posePtr = &poseKeypoints.at(personIndex*poseKeypoints.getSize(1)*poseKeypoints.getSize(2));
const auto lWristScoreAbove = (posePtr[lWrist*3+2] > threshold);
const auto lElbowScoreAbove = (posePtr[lElbow*3+2] > threshold);
const auto lShoulderScoreAbove = (posePtr[lShoulder*3+2] > threshold);
const auto rWristScoreAbove = (posePtr[rWrist*3+2] > threshold);
const auto rElbowScoreAbove = (posePtr[rElbow*3+2] > threshold);
const auto rShoulderScoreAbove = (posePtr[rShoulder*3+2] > threshold);
// const auto neckScoreAbove = (posePtr[neck*3+2] > threshold);
// const auto headNoseScoreAbove = (posePtr[headNose*3+2] > threshold);
const auto ratio = 0.33f;
auto& handLeftRectangle = handRectangle.at(0);
auto& handRightRectangle = handRectangle.at(1);
// Left hand
if (lWristScoreAbove && lElbowScoreAbove && lShoulderScoreAbove)
{
handLeftRectangle.x = posePtr[lWrist*3] + ratio * (posePtr[lWrist*3] - posePtr[lElbow*3]);
handLeftRectangle.y = posePtr[lWrist*3+1] + ratio * (posePtr[lWrist*3+1] - posePtr[lElbow*3+1]);
const auto distanceWristElbow = getDistance(posePtr, lWrist, lElbow);
const auto distanceElbowShoulder = getDistance(posePtr, lElbow, lShoulder);
// const auto distanceWristShoulder = getDistance(posePtr, lWrist, lShoulder);
// if (distanceWristElbow / distanceElbowShoulder > 0.85)
handLeftRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
// else
// handLeftRectangle.width = 1.5f * 0.9f * distanceElbowShoulder * fastMin(distanceElbowShoulder / distanceWristElbow, 3.f);
// somehow --> if distanceWristShoulder ~ distanceElbowShoulder --> do zoom in
}
// Right hand
if (rWristScoreAbove && rElbowScoreAbove && rShoulderScoreAbove)
{
handRightRectangle.x = posePtr[rWrist*3] + ratio * (posePtr[rWrist*3] - posePtr[rElbow*3]);
handRightRectangle.y = posePtr[rWrist*3+1] + ratio * (posePtr[rWrist*3+1] - posePtr[rElbow*3+1]);
handRightRectangle.width = 1.5f * fastMax(getDistance(posePtr, rWrist, rElbow), 0.9f * getDistance(posePtr, rElbow, rShoulder));
}
handLeftRectangle.height = handLeftRectangle.width;
handLeftRectangle.x -= handLeftRectangle.width / 2.f;
handLeftRectangle.y -= handLeftRectangle.height / 2.f;
handRightRectangle.height = handRightRectangle.width;
handRightRectangle.x -= handRightRectangle.width / 2.f;
handRightRectangle.y -= handRightRectangle.height / 2.f;
return handRectangle;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return std::array<Rectangle<float>, 2>{};
}
}
HandDetector::HandDetector(const PoseModel poseModel) :
mPoseIndexes{getPoseKeypoints(poseModel, {"LWrist", "LElbow", "LShoulder", "RWrist", "RElbow", "RShoulder"})}
{
}
std::vector<std::array<Rectangle<float>, 2>> HandDetector::detectHands(const Array<float>& poseKeypoints, const float scaleInputToOutput) const
{
try
{
const auto numberPeople = poseKeypoints.getSize(0);
std::vector<std::array<Rectangle<float>, 2>> handRectangles(numberPeople);
const auto threshold = 0.25f;
// If no poseKeypoints detected -> no way to detect hand location
// Otherwise, get hand position(s)
if (!poseKeypoints.empty())
{
for (auto person = 0 ; person < numberPeople ; person++)
{
handRectangles.at(person) = getHandFromPoseIndexes(
poseKeypoints, person, mPoseIndexes[(int)PosePart::LWrist], mPoseIndexes[(int)PosePart::LElbow],
mPoseIndexes[(int)PosePart::LShoulder], mPoseIndexes[(int)PosePart::RWrist],
mPoseIndexes[(int)PosePart::RElbow], mPoseIndexes[(int)PosePart::RShoulder], threshold
);
handRectangles.at(person).at(0) /= scaleInputToOutput;
handRectangles.at(person).at(1) /= scaleInputToOutput;
}
}
return handRectangles;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return std::vector<std::array<Rectangle<float>, 2>>{};
}
}
std::vector<std::array<Rectangle<float>, 2>> HandDetector::trackHands(const Array<float>& poseKeypoints, const float scaleInputToOutput)
{
try
{
auto handRectangles = detectHands(poseKeypoints, scaleInputToOutput);
return handRectangles;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return std::vector<std::array<Rectangle<float>, 2>>{};
}
}
void HandDetector::updateTracker(const Array<float>& poseKeypoints, const Array<float>& handKeypoints)
{
try
{
// Security checks
if (poseKeypoints.getSize(0) != handKeypoints.getSize(0))
error("Number people on poseKeypoints different than in handKeypoints.", __LINE__, __FUNCTION__, __FILE__);
// Parameters
const auto numberPeople = poseKeypoints.getSize(0);
const auto numberParts = poseKeypoints.getSize(1);
const auto numberChannels = poseKeypoints.getSize(2);
// Update pose keypoints and hand rectangles
mPoseTrack.resize(numberPeople);
mHandTrack.resize(numberPeople);
for (auto personIndex = 0 ; personIndex < mPoseTrack.size() ; personIndex++)
{
// Update pose keypoints
const auto* posePtr = &poseKeypoints.at(personIndex * numberParts * numberChannels);
for (auto j = 0 ; j < mPoseIndexes.size() ; j++)
mPoseTrack[personIndex][j] = Point<float>{posePtr[numberChannels*mPoseIndexes[j]], posePtr[numberChannels*mPoseIndexes[j+1]]};
// Update hand rectangles
// for (auto j = 0 ; j < mPoseIndexes.size() ; j++)
// mHandTrack[personIndex] = XXXXXXXXXXXXXXXXXx;
}
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
std::array<unsigned int, (int)HandDetector::PosePart::Size> HandDetector::getPoseKeypoints(const PoseModel poseModel,
const std::array<std::string,
(int)HandDetector::PosePart::Size>& poseStrings
)
{
std::array<unsigned int, (int)PosePart::Size> poseKeypoints;
for (auto i = 0 ; i < poseKeypoints.size() ; i++)
{
poseKeypoints.at(i) = poseBodyPartMapStringToKey(poseModel, poseStrings.at(i));
}
return poseKeypoints;
}
}
#include <opencv2/opencv.hpp> // CV_WARP_INVERSE_MAP, CV_INTER_LINEAR
#include <openpose/core/netCaffe.hpp>
#include <openpose/hand/handParameters.hpp>
#include <openpose/utilities/check.hpp>
#include <openpose/utilities/cuda.hpp>
#include <openpose/utilities/errorAndLog.hpp>
#include <openpose/utilities/fastMath.hpp>
#include <openpose/utilities/openCv.hpp>
#include <openpose/hand/handExtractor.hpp>
namespace op
{
HandExtractor::HandExtractor(const Point<int>& netInputSize, const Point<int>& netOutputSize, const std::string& modelFolder, const int gpuId) :
mNetOutputSize{netOutputSize},
spNet{std::make_shared<NetCaffe>(std::array<int,4>{1, 3, mNetOutputSize.y, mNetOutputSize.x}, modelFolder + HAND_PROTOTXT, modelFolder + HAND_TRAINED_MODEL, gpuId)},
spResizeAndMergeCaffe{std::make_shared<ResizeAndMergeCaffe<float>>()},
spNmsCaffe{std::make_shared<NmsCaffe<float>>()},
mHandImageCrop{mNetOutputSize.area()*3}
{
try
{
error("Hands extraction is not implemented yet. COMING SOON!", __LINE__, __FUNCTION__, __FILE__);
checkE(netOutputSize.x, netInputSize.x, "Net input and output size must be equal.", __LINE__, __FUNCTION__, __FILE__);
checkE(netOutputSize.y, netInputSize.y, "Net input and output size must be equal.", __LINE__, __FUNCTION__, __FILE__);
checkE(netInputSize.x, netInputSize.y, "Net input size must be squared.", __LINE__, __FUNCTION__, __FILE__);
// Properties
for (auto& property : mProperties)
property = 0.;
mProperties[(int)HandProperty::NMSThreshold] = HAND_DEFAULT_NMS_THRESHOLD;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandExtractor::initializationOnThread()
{
try
{
log("Starting initialization on thread.", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
// Get thread id
mThreadId = {std::this_thread::get_id()};
// Caffe net
spNet->initializationOnThread();
spCaffeNetOutputBlob = ((NetCaffe*)spNet.get())->getOutputBlob();
cudaCheck(__LINE__, __FUNCTION__, __FILE__);
// HeatMaps extractor blob and layer
spHeatMapsBlob = {std::make_shared<caffe::Blob<float>>(1,1,1,1)};
const bool mergeFirstDimension = true;
spResizeAndMergeCaffe->Reshape({spCaffeNetOutputBlob.get()}, {spHeatMapsBlob.get()}, HAND_CCN_DECREASE_FACTOR, mergeFirstDimension);
cudaCheck(__LINE__, __FUNCTION__, __FILE__);
// Pose extractor blob and layer
spPeaksBlob = {std::make_shared<caffe::Blob<float>>(1,1,1,1)};
spNmsCaffe->Reshape({spHeatMapsBlob.get()}, {spPeaksBlob.get()}, HAND_MAX_PEAKS, HAND_NUMBER_PARTS+1);
cudaCheck(__LINE__, __FUNCTION__, __FILE__);
log("Finished initialization on thread.", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandExtractor::forwardPass(const std::vector<std::array<Rectangle<float>, 2>> handRectangles, const cv::Mat& cvInputData, const float scaleInputToOutput)
{
try
{
error("Hands extraction is not implemented yet. COMING SOON!", __LINE__, __FUNCTION__, __FILE__);
UNUSED(handRectangles);
UNUSED(cvInputData);
UNUSED(scaleInputToOutput);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
Array<float> HandExtractor::getHandKeypoints() const
{
try
{
checkThread();
return mHandKeypoints;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return Array<float>{};
}
}
double HandExtractor::get(const HandProperty property) const
{
try
{
return mProperties.at((int)property);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
return 0.;
}
}
void HandExtractor::set(const HandProperty property, const double value)
{
try
{
mProperties.at((int)property) = {value};
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandExtractor::increase(const HandProperty property, const double value)
{
try
{
mProperties[(int)property] = mProperties.at((int)property) + value;
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandExtractor::checkThread() const
{
try
{
if(mThreadId != std::this_thread::get_id())
error("The CPU/GPU pointer data cannot be accessed from a different thread.", __LINE__, __FUNCTION__, __FILE__);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
}
#ifndef CPU_ONLY
#include <cuda.h>
#include <cuda_runtime_api.h>
#endif
#include <openpose/hand/handParameters.hpp>
#include <openpose/hand/renderHand.hpp>
#include <openpose/utilities/cuda.hpp>
#include <openpose/utilities/errorAndLog.hpp>
#include <openpose/hand/handRenderer.hpp>
namespace op
{
HandRenderer::HandRenderer(const Point<int>& frameSize, const float alphaKeypoint, const float alphaHeatMap, const RenderMode renderMode) :
Renderer{(unsigned long long)(frameSize.area() * 3), alphaKeypoint, alphaHeatMap},
mFrameSize{frameSize},
mRenderMode{renderMode}
{
}
HandRenderer::~HandRenderer()
{
try
{
// Free CUDA pointers - Note that if pointers are 0 (i.e. nullptr), no operation is performed.
#ifndef CPU_ONLY
cudaFree(pGpuHand);
#endif
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandRenderer::initializationOnThread()
{
try
{
log("Starting initialization on thread.", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
Renderer::initializationOnThread();
// GPU memory allocation for rendering
#ifndef CPU_ONLY
cudaMalloc((void**)(&pGpuHand), 2*HAND_NUMBER_PARTS * 3 * sizeof(float));
#endif
log("Finished initialization on thread.", Priority::Low, __LINE__, __FUNCTION__, __FILE__);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandRenderer::renderHand(Array<float>& outputData, const Array<float>& handKeypoints)
{
try
{
// Security checks
if (outputData.empty())
error("Empty Array<float> outputData.", __LINE__, __FUNCTION__, __FILE__);
// CPU rendering
if (mRenderMode == RenderMode::Cpu)
renderHandCpu(outputData, handKeypoints);
// GPU rendering
else
renderHandGpu(outputData, handKeypoints);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandRenderer::renderHandCpu(Array<float>& outputData, const Array<float>& handKeypoints)
{
try
{
renderHandKeypointsCpu(outputData, handKeypoints);
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
void HandRenderer::renderHandGpu(Array<float>& outputData, const Array<float>& handKeypoints)
{
try
{
// GPU rendering
#ifndef CPU_ONLY
const auto elementRendered = spElementToRender->load(); // I prefer std::round(T&) over intRound(T) for std::atomic
const auto numberPeople = handKeypoints.getSize(0);
// GPU rendering
if (numberPeople > 0 && elementRendered == 0)
{
cpuToGpuMemoryIfNotCopiedYet(outputData.getPtr());
// Draw handKeypoints
cudaMemcpy(pGpuHand, handKeypoints.getConstPtr(), 2*HAND_NUMBER_PARTS*3 * sizeof(float), cudaMemcpyHostToDevice);
renderHandKeypointsGpu(*spGpuMemoryPtr, mFrameSize, pGpuHand, handKeypoints.getSize(0));
// CUDA check
cudaCheck(__LINE__, __FUNCTION__, __FILE__);
}
// GPU memory to CPU if last renderer
gpuToCpuMemoryIfLastRenderer(outputData.getPtr());
cudaCheck(__LINE__, __FUNCTION__, __FILE__);
// CPU_ONLY mode
#else
error("GPU rendering not available if `CPU_ONLY` is set.", __LINE__, __FUNCTION__, __FILE__);
UNUSED(outputData);
UNUSED(handKeypoints);
#endif
}
catch (const std::exception& e)
{
error(e.what(), __LINE__, __FUNCTION__, __FILE__);
}
}
}
#include <openpose/experimental/hand/handParameters.hpp>
#include <openpose/hand/handParameters.hpp>
#include <openpose/utilities/errorAndLog.hpp>
#include <openpose/utilities/fastMath.hpp>
#include <openpose/utilities/keypoint.hpp>
#include <openpose/experimental/hand/renderHand.hpp>
#include <openpose/hand/renderHand.hpp>
namespace op
{
......@@ -15,7 +15,7 @@ namespace op
if (!frameArray.empty())
{
// Parameters
const auto thicknessCircleRatio = 1.f/200.f;
const auto thicknessCircleRatio = 1.f/50.f;
const auto thicknessLineRatioWRTCircle = 0.75f;
const auto& pairs = HAND_PAIRS_RENDER;
......
#include <openpose/experimental/hand/handParameters.hpp>
#include <openpose/hand/handParameters.hpp>
#include <openpose/utilities/errorAndLog.hpp>
#include <openpose/utilities/cuda.hpp>
#include <openpose/utilities/cuda.hu>
#include <openpose/utilities/render.hu>
#include <openpose/experimental/hand/renderHand.hpp>
#include <openpose/hand/renderHand.hpp>
namespace op
{
......@@ -12,8 +12,9 @@ namespace op
__global__ void renderHandsParts(float* targetPtr, const int targetWidth, const int targetHeight, const float* const handsPtr,
const int numberHands, const float threshold, const float alphaColorToAdd)
__global__ void renderHandsParts(float* targetPtr, const int targetWidth, const int targetHeight,
const float* const handsPtr, const int numberHands,
const float threshold, const float alphaColorToAdd)
{
const auto x = (blockIdx.x * blockDim.x) + threadIdx.x;
const auto y = (blockIdx.y * blockDim.y) + threadIdx.y;
......@@ -48,8 +49,8 @@ namespace op
dim3 threadsPerBlock;
dim3 numBlocks;
std::tie(threadsPerBlock, numBlocks) = getNumberCudaThreadsAndBlocks(frameSize);
renderHandsParts<<<threadsPerBlock, numBlocks>>>(framePtr, frameSize.x, frameSize.y, handsPtr, numberHands, threshold,
alphaColorToAdd);
renderHandsParts<<<threadsPerBlock, numBlocks>>>(framePtr, frameSize.x, frameSize.y, handsPtr,
numberHands, threshold, alphaColorToAdd);
cudaCheck(__LINE__, __FUNCTION__, __FILE__);
}
}
......
......@@ -7,8 +7,9 @@
namespace op
{
template <typename T>
void connectBodyPartsCpu(Array<T>& poseKeypoints, const T* const heatMapPtr, const T* const peaksPtr, const PoseModel poseModel, const Point<int>& heatMapSize, const int maxPeaks,
const int interMinAboveThreshold, const T interThreshold, const int minSubsetCnt, const T minSubsetScore, const T scaleFactor)
void connectBodyPartsCpu(Array<T>& poseKeypoints, const T* const heatMapPtr, const T* const peaksPtr, const PoseModel poseModel,
const Point<int>& heatMapSize, const int maxPeaks, const int interMinAboveThreshold,
const T interThreshold, const int minSubsetCnt, const T minSubsetScore, const T scaleFactor)
{
try
{
......
......@@ -43,8 +43,8 @@ namespace op
}
}
PoseExtractor::PoseExtractor(const Point<int>& netOutputSize, const Point<int>& outputSize, const PoseModel poseModel, const std::vector<HeatMapType>& heatMapTypes,
const ScaleMode heatMapScale) :
PoseExtractor::PoseExtractor(const Point<int>& netOutputSize, const Point<int>& outputSize, const PoseModel poseModel,
const std::vector<HeatMapType>& heatMapTypes, const ScaleMode heatMapScale) :
mPoseModel{poseModel},
mNetOutputSize{netOutputSize},
mOutputSize{outputSize},
......
......@@ -2,11 +2,13 @@
namespace op
{
namespace experimental
WrapperStructHand::WrapperStructHand(const bool enable_, const Point<int>& netInputSize_, const RenderMode renderMode_,
const float alphaKeypoint_, const float alphaHeatMap_) :
enable{enable_},
netInputSize{netInputSize_},
renderMode{renderMode_},
alphaKeypoint{alphaKeypoint_},
alphaHeatMap{alphaHeatMap_}
{
WrapperStructHand::WrapperStructHand(const bool extractAndRenderHands_) :
extractAndRenderHands{extractAndRenderHands_}
{
}
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册