Skip to content

Commit

Permalink
write pose json
Browse files Browse the repository at this point in the history
  • Loading branch information
Arash Hosseini committed Jan 24, 2019
1 parent b119759 commit eb25b19
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 7 deletions.
4 changes: 2 additions & 2 deletions run.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')

parser.add_argument('--output_json', type=str, default='/tmp/', help='writing output json dir')
args = parser.parse_args()

w, h = model_wh(args.resize)
Expand All @@ -47,7 +47,7 @@

logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))

image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False, frame=0, output_json_dir=args.output_json)

import matplotlib.pyplot as plt

Expand Down
10 changes: 9 additions & 1 deletion run_webcam.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,16 @@
help='if provided, resize heatmaps before they are post-processed. default=1.0')

parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin')

parser.add_argument('--output_json', type=str, default='/tmp/', help='writing output json dir')

parser.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
args = parser.parse_args()

logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))
w, h = model_wh(args.resize)

if w > 0 and h > 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
else:
Expand All @@ -44,14 +48,18 @@
ret_val, image = cam.read()
logger.info('cam image=%dx%d' % (image.shape[1], image.shape[0]))

frame = 0
while True:
ret_val, image = cam.read()

logger.debug('image process+')
humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)

logger.debug('postprocess+')
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)


image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False, frame=frame, output_json_dir=args.output_json)
frame += 1

logger.debug('show+')
cv2.putText(image,
Expand Down
23 changes: 19 additions & 4 deletions tf_pose/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
import numpy as np
import tensorflow as tf
import time

import json
import os
from tf_pose import common
from tf_pose.common import CocoPart
from tf_pose.tensblur.smoother import Smoother
Expand Down Expand Up @@ -303,6 +304,7 @@ class TfPoseEstimator:
def __init__(self, graph_path, target_size=(320, 240), tf_config=None):
self.target_size = target_size


# load graph
logger.info('loading graph from %s(default size=%dx%d)' % (graph_path, target_size[0], target_size[1]))
with tf.gfile.GFile(graph_path, 'rb') as f:
Expand Down Expand Up @@ -378,12 +380,14 @@ def _quantize_img(npimg):
return npimg_q

@staticmethod
def draw_humans(npimg, humans, imgcopy=False):
def draw_humans(npimg, humans, imgcopy=False, frame=0, output_json_dir=None):
if imgcopy:
npimg = np.copy(npimg)
image_h, image_w = npimg.shape[:2]
dc = {"people":[]}
centers = {}
for human in humans:
for n, human in enumerate(humans):
flat = [0.0 for i in range(36)]
# draw point
for i in range(common.CocoPart.Background.value):
if i not in human.body_parts.keys():
Expand All @@ -392,7 +396,11 @@ def draw_humans(npimg, humans, imgcopy=False):
body_part = human.body_parts[i]
center = (int(body_part.x * image_w + 0.5), int(body_part.y * image_h + 0.5))
centers[i] = center
cv2.circle(npimg, center, 3, common.CocoColors[i], thickness=3, lineType=8, shift=0)
#add x
flat[i*2] = center[0]
#add y
flat[i*2+1] = center[1]
cv2.circle(npimg, center, 8, common.CocoColors[i], thickness=3, lineType=8, shift=0)

# draw line
for pair_order, pair in enumerate(common.CocoPairsRender):
Expand All @@ -402,6 +410,13 @@ def draw_humans(npimg, humans, imgcopy=False):
# npimg = cv2.line(npimg, centers[pair[0]], centers[pair[1]], common.CocoColors[pair_order], 3)
cv2.line(npimg, centers[pair[0]], centers[pair[1]], common.CocoColors[pair_order], 3)

dc["people"].append({"pose_keypoints_2d" : flat})

if output_json_dir:
with open(os.path.join(output_json_dir, '{0}_keypoints.json'.format(str(frame).zfill(12))), 'w') as outfile:
json.dump(dc, outfile)


return npimg

def _get_scaled_img(self, npimg, scale):
Expand Down

1 comment on commit eb25b19

@fenaux
Copy link

@fenaux fenaux commented on eb25b19 Mar 12, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for this commit.
I think that diff for run_webcam also applies for run_video. Dis you already check such a modification ?
Thanks for your help

Please sign in to comment.