diff --git "a/data/1.OpenCV\345\210\235\351\230\266/7.OpenCV\344\270\255\347\232\204\346\267\261\345\272\246\345\255\246\344\271\240/4.\345\247\277\346\200\201\344\274\260\350\256\241/attitude_estimation.py" "b/data/1.OpenCV\345\210\235\351\230\266/7.OpenCV\344\270\255\347\232\204\346\267\261\345\272\246\345\255\246\344\271\240/4.\345\247\277\346\200\201\344\274\260\350\256\241/attitude_estimation.py" new file mode 100644 index 0000000000000000000000000000000000000000..c4dece1cc7955a704713c65a19aa88c97719c276 --- /dev/null +++ "b/data/1.OpenCV\345\210\235\351\230\266/7.OpenCV\344\270\255\347\232\204\346\267\261\345\272\246\345\255\246\344\271\240/4.\345\247\277\346\200\201\344\274\260\350\256\241/attitude_estimation.py" @@ -0,0 +1,35 @@ +import cv2 +import mediapipe as mp +import time + +mpPose = mp.solutions.pose +pose = mpPose.Pose() +mpDraw = mp.solutions.drawing_utils +cap = cv2.VideoCapture('1.mp4') +pTime = 0 +while True: + success, img = cap.read() + if success is False: + break + imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + results = pose.process(imgRGB) + if results is None: + continue + print(results.pose_landmarks) + if results.pose_landmarks: + mpDraw.draw_landmarks(img, results.pose_landmarks, mpPose.POSE_CONNECTIONS) + for id, lm in enumerate(results.pose_landmarks.landmark): + h, w, c = img.shape + print(id, lm) + cx, cy = int(lm.x * w), int(lm.y * h) + cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED) + cTime = time.time() + fps = 1 / (cTime - pTime) + pTime = cTime + cv2.putText(img, str(int(fps)), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 3) + cv2.imshow("Image", img) + key = cv2.waitKey(1) & 0xFF +# do a bit of cleanup +cv2.destroyAllWindows() +cap.release() +