Skip to content

Commit

Permalink
video: adding face detection and person detection samples for beta [(…
Browse files Browse the repository at this point in the history
…#2919)](GoogleCloudPlatform/python-docs-samples#2919)

* video: adding face detection and person detection samples for beta

* updating requirements.txt

* updating test names to faces

* fixing region tag typo

* responding to comments

* reverted tabs to fix  linting errors

* responding to comments
  • Loading branch information
czahedi authored Feb 20, 2020
1 parent 8c04d10 commit 51f58b5
Show file tree
Hide file tree
Showing 9 changed files with 487 additions and 1 deletion.
2 changes: 1 addition & 1 deletion samples/analyze/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
google-cloud-videointelligence==1.12.1
google-cloud-videointelligence==1.13.0
google-cloud-storage==1.23.0
85 changes: 85 additions & 0 deletions samples/analyze/video_detect_faces_beta.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# [START video_detect_faces_beta]
import io
from google.cloud import videointelligence_v1p3beta1 as videointelligence


def detect_faces(local_file_path="path/to/your/video-file.mp4"):
"""Detects faces in a video from a local file."""

client = videointelligence.VideoIntelligenceServiceClient()

with io.open(local_file_path, "rb") as f:
input_content = f.read()

# Configure the request
config = videointelligence.types.FaceDetectionConfig(
include_bounding_boxes=True, include_attributes=True
)
context = videointelligence.types.VideoContext(
face_detection_config=config
)

# Start the asynchronous request
operation = client.annotate_video(
input_content=input_content,
features=[videointelligence.enums.Feature.FACE_DETECTION],
video_context=context,
)

print("\nProcessing video for face detection annotations.")
result = operation.result(timeout=300)

print("\nFinished processing.\n")

# Retrieve the first result, because a single video was processed.
annotation_result = result.annotation_results[0]

for annotation in annotation_result.face_detection_annotations:
print("Face detected:")
for track in annotation.tracks:
print(
"Segment: {}s to {}s".format(
track.segment.start_time_offset.seconds
+ track.segment.start_time_offset.nanos / 1e9,
track.segment.end_time_offset.seconds
+ track.segment.end_time_offset.nanos / 1e9,
)
)

# Each segment includes timestamped faces that include
# characteristics of the face detected.
# Grab the first timestamped face
timestamped_object = track.timestamped_objects[0]
box = timestamped_object.normalized_bounding_box
print("Bounding box:")
print("\tleft : {}".format(box.left))
print("\ttop : {}".format(box.top))
print("\tright : {}".format(box.right))
print("\tbottom: {}".format(box.bottom))

# Attributes include glasses, headwear, facial hair, smiling,
# direction of gaze, etc.
print("Attributes:")
for attribute in timestamped_object.attributes:
print(
"\t{}:{} {}".format(
attribute.name, attribute.value, attribute.confidence
)
)


# [END video_detect_faces_beta]
30 changes: 30 additions & 0 deletions samples/analyze/video_detect_faces_beta_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os

import video_detect_faces_beta

RESOURCES = os.path.join(os.path.dirname(__file__), "resources")


def test_detect_faces(capsys):
local_file_path = os.path.join(RESOURCES, "googlework_short.mp4")

video_detect_faces_beta.detect_faces(local_file_path=local_file_path)

out, _ = capsys.readouterr()

assert "Face detected:" in out
assert "Attributes:" in out
81 changes: 81 additions & 0 deletions samples/analyze/video_detect_faces_gcs_beta.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# [START video_detect_faces_gcs_beta]
from google.cloud import videointelligence_v1p3beta1 as videointelligence


def detect_faces(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"):
"""Detects faces in a video."""

client = videointelligence.VideoIntelligenceServiceClient()

# Configure the request
config = videointelligence.types.FaceDetectionConfig(
include_bounding_boxes=True, include_attributes=True
)
context = videointelligence.types.VideoContext(
face_detection_config=config
)

# Start the asynchronous request
operation = client.annotate_video(
input_uri=gcs_uri,
features=[videointelligence.enums.Feature.FACE_DETECTION],
video_context=context,
)

print("\nProcessing video for face detection annotations.")
result = operation.result(timeout=300)

print("\nFinished processing.\n")

# Retrieve the first result, because a single video was processed.
annotation_result = result.annotation_results[0]

for annotation in annotation_result.face_detection_annotations:
print("Face detected:")
for track in annotation.tracks:
print(
"Segment: {}s to {}s".format(
track.segment.start_time_offset.seconds
+ track.segment.start_time_offset.nanos / 1e9,
track.segment.end_time_offset.seconds
+ track.segment.end_time_offset.nanos / 1e9,
)
)

# Each segment includes timestamped faces that include
# characteristics of the face detected.
# Grab the first timestamped face
timestamped_object = track.timestamped_objects[0]
box = timestamped_object.normalized_bounding_box
print("Bounding box:")
print("\tleft : {}".format(box.left))
print("\ttop : {}".format(box.top))
print("\tright : {}".format(box.right))
print("\tbottom: {}".format(box.bottom))

# Attributes include glasses, headwear, facial hair, smiling,
# direction of gaze, etc.
print("Attributes:")
for attribute in timestamped_object.attributes:
print(
"\t{}:{} {}".format(
attribute.name, attribute.value, attribute.confidence
)
)


# [END video_detect_faces_gcs_beta]
30 changes: 30 additions & 0 deletions samples/analyze/video_detect_faces_gcs_beta_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os

import video_detect_faces_gcs_beta

RESOURCES = os.path.join(os.path.dirname(__file__), "resources")


def test_detect_faces(capsys):
input_uri = "gs://cloud-samples-data/video/googlework_short.mp4"

video_detect_faces_gcs_beta.detect_faces(gcs_uri=input_uri)

out, _ = capsys.readouterr()

assert "Face detected:" in out
assert "Attributes:" in out
100 changes: 100 additions & 0 deletions samples/analyze/video_detect_person_beta.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# [START video_detect_person_beta]
import io
from google.cloud import videointelligence_v1p3beta1 as videointelligence


def detect_person(local_file_path="path/to/your/video-file.mp4"):
"""Detects people in a video from a local file."""

client = videointelligence.VideoIntelligenceServiceClient()

with io.open(local_file_path, "rb") as f:
input_content = f.read()

# Configure the request
config = videointelligence.types.PersonDetectionConfig(
include_bounding_boxes=True,
include_attributes=True,
include_pose_landmarks=True,
)
context = videointelligence.types.VideoContext(
person_detection_config=config
)

# Start the asynchronous request
operation = client.annotate_video(
input_content=input_content,
features=[videointelligence.enums.Feature.PERSON_DETECTION],
video_context=context,
)

print("\nProcessing video for person detection annotations.")
result = operation.result(timeout=300)

print("\nFinished processing.\n")

# Retrieve the first result, because a single video was processed.
annotation_result = result.annotation_results[0]

for annotation in annotation_result.person_detection_annotations:
print("Person detected:")
for track in annotation.tracks:
print(
"Segment: {}s to {}s".format(
track.segment.start_time_offset.seconds
+ track.segment.start_time_offset.nanos / 1e9,
track.segment.end_time_offset.seconds
+ track.segment.end_time_offset.nanos / 1e9,
)
)

# Each segment includes timestamped objects that include
# characteristic - -e.g.clothes, posture of the person detected.
# Grab the first timestamped object
timestamped_object = track.timestamped_objects[0]
box = timestamped_object.normalized_bounding_box
print("Bounding box:")
print("\tleft : {}".format(box.left))
print("\ttop : {}".format(box.top))
print("\tright : {}".format(box.right))
print("\tbottom: {}".format(box.bottom))

# Attributes include unique pieces of clothing,
# poses, or hair color.
print("Attributes:")
for attribute in timestamped_object.attributes:
print(
"\t{}:{} {}".format(
attribute.name, attribute.value, attribute.confidence
)
)

# Landmarks in person detection include body parts such as
# left_shoulder, right_ear, and right_ankle
print("Landmarks:")
for landmark in timestamped_object.landmarks:
print(
"\t{}: {} (x={}, y={})".format(
landmark.name,
landmark.confidence,
landmark.point.x, # Normalized vertex
landmark.point.y, # Normalized vertex
)
)


# [END video_detect_person_beta]
32 changes: 32 additions & 0 deletions samples/analyze/video_detect_person_beta_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os

import video_detect_person_beta

RESOURCES = os.path.join(os.path.dirname(__file__), "resources")


def test_detect_person(capsys):
local_file_path = os.path.join(RESOURCES, "googlework_tiny.mp4")

video_detect_person_beta.detect_person(local_file_path=local_file_path)

out, _ = capsys.readouterr()

assert "Person detected:" in out
assert "Attributes:" in out
assert "x=" in out
assert "y=" in out
Loading

0 comments on commit 51f58b5

Please sign in to comment.