support mask prompt for video tracking

This commit is contained in:
rentainhe
2024-08-07 16:42:49 +08:00
parent 7c0995e9c3
commit 37cf27cfe3
2 changed files with 54 additions and 23 deletions

View File

@@ -11,7 +11,7 @@ import cv2
import torch
import numpy as np
import supervision as sv
from supervision.draw.color import ColorPalette
from pathlib import Path
from tqdm import tqdm
from PIL import Image
@@ -29,6 +29,7 @@ OUTPUT_VIDEO_PATH = "./hippopotamus_tracking_demo.mp4"
SOURCE_VIDEO_FRAME_DIR = "./custom_video_frames"
SAVE_TRACKING_RESULTS_DIR = "./tracking_results"
API_TOKEN_FOR_GD1_5 = "Your API token"
PROMPT_TYPE_FOR_VIDEO = "mask" # "point"
"""
Step 1: Environment settings and model initialization for SAM 2
@@ -151,6 +152,10 @@ if masks.ndim == 4:
Step 3: Register each object's positive points to video predictor with seperate add_new_points call
"""
assert PROMPT_TYPE_FOR_VIDEO in ["point", "mask"]
# If you are using point prompts, we uniformly sample positive points based on the mask
if PROMPT_TYPE_FOR_VIDEO == "point":
# sample the positive points from mask for each objects
all_sample_points = sample_points_from_masks(masks=masks, num_points=10)
@@ -163,6 +168,16 @@ for object_id, (label, points) in enumerate(zip(OBJECTS, all_sample_points), sta
points=points,
labels=labels,
)
# Using mask prompt is a more straightforward way
elif PROMPT_TYPE_FOR_VIDEO == "mask":
for object_id, (label, mask) in enumerate(zip(OBJECTS, masks), start=1):
labels = np.ones((1), dtype=np.int32)
_, out_obj_ids, out_mask_logits = video_predictor.add_new_mask(
inference_state=inference_state,
frame_idx=ann_frame_idx,
obj_id=object_id,
mask=mask
)
"""

View File

@@ -129,6 +129,12 @@ elif masks.ndim == 4:
Step 3: Register each object's positive points to video predictor with seperate add_new_points call
"""
PROMPT_TYPE_FOR_VIDEO = "mask" # or "point"
assert PROMPT_TYPE_FOR_VIDEO in ["point", "mask"]
# If you are using point prompts, we uniformly sample positive points based on the mask
if PROMPT_TYPE_FOR_VIDEO == "point":
# sample the positive points from mask for each objects
all_sample_points = sample_points_from_masks(masks=masks, num_points=10)
@@ -141,6 +147,16 @@ for object_id, (label, points) in enumerate(zip(OBJECTS, all_sample_points), sta
points=points,
labels=labels,
)
# Using mask prompt is a more straightforward way
elif PROMPT_TYPE_FOR_VIDEO == "mask":
for object_id, (label, mask) in enumerate(zip(OBJECTS, masks), start=1):
labels = np.ones((1), dtype=np.int32)
_, out_obj_ids, out_mask_logits = video_predictor.add_new_mask(
inference_state=inference_state,
frame_idx=ann_frame_idx,
obj_id=object_id,
mask=mask
)
"""