update DINO-X api usage to dds v2

This commit is contained in:
rentainhe
2025-04-20 01:04:26 +08:00
parent 3c5a4136d4
commit d49257700a
3 changed files with 43 additions and 33 deletions

View File

@@ -20,6 +20,7 @@ In this repo, we've supported the following demo with **simple implementations**
Grounded SAM 2 does not introduce significant methodological changes compared to [Grounded SAM: Assembling Open-World Models for Diverse Visual Tasks](https://arxiv.org/abs/2401.14159). Both approaches leverage the capabilities of open-world models to address complex visual tasks. Consequently, we try to **simplify the code implementation** in this repository, aiming to enhance user convenience.
## Latest updates
- **2025.04.20**: Update to `dds-cloudapi-sdk` API V2 version. The V1 version in the original API for `Grounding DINO 1.5` and `DINO-X` has been deprecated, please update to the latest `dds-cloudapi-sdk` by `pip install dds-cloudapi-sdk -U` to use `Grounding DINO 1.5 / 1.6` and `DINO-X` models. Please refer to [dds-cloudapi-sdk](https://github.com/deepdataspace/dds-cloudapi-sdk) and our [API docs](https://cloud.deepdataspace.com/docs) to view more details about the update.
- **2024.12.02**: Support **DINO-X with SAM 2** demos (including object segmentation and tracking), please install the latest version of `dds-cloudapi-sdk==0.3.3` and refer to [Grounded SAM 2 (with DINO-X)](#grounded-sam-2-image-demo-with-dino-x) and [Grounded SAM 2 Video (with DINO-X)](#grounded-sam-2-video-object-tracking-demo-with-custom-video-input-with-dino-x) for more details.

View File

@@ -1,9 +1,7 @@
# dds cloudapi for Grounding DINO 1.5
# dds cloudapi for DINO-X - update to V2Task API
from dds_cloudapi_sdk import Config
from dds_cloudapi_sdk import Client
from dds_cloudapi_sdk.tasks.dinox import DinoxTask
from dds_cloudapi_sdk.tasks.types import DetectionTarget
from dds_cloudapi_sdk import TextPrompt
from dds_cloudapi_sdk.tasks.v2_task import V2Task
import os
import cv2
@@ -30,6 +28,7 @@ SAVE_TRACKING_RESULTS_DIR = "./tracking_results"
API_TOKEN_FOR_DINOX = "Your API token"
PROMPT_TYPE_FOR_VIDEO = "box" # choose from ["point", "box", "mask"]
BOX_THRESHOLD = 0.2
IOU_THRESHOLD = 0.8 # 添加IOU阈值参数
"""
Step 1: Environment settings and model initialization for SAM 2
@@ -98,22 +97,29 @@ config = Config(API_TOKEN_FOR_DINOX)
# Step 2: initialize the client
client = Client(config)
# Step 3: run the task by DetectionTask class
# image_url = "https://algosplt.oss-cn-shenzhen.aliyuncs.com/test_files/tasks/detection/iron_man.jpg"
# Step 3: run the task using V2Task class
# if you are processing local image file, upload them to DDS server to get the image url
image_url = client.upload_file(img_path)
task = DinoxTask(
image_url=image_url,
prompts=[TextPrompt(text=TEXT_PROMPT)],
bbox_threshold=0.25,
targets=[DetectionTarget.BBox],
task = V2Task(
api_path="/v2/task/dinox/detection",
api_body={
"model": "DINO-X-1.0",
"image": image_url,
"prompt": {
"type": "text",
"text": TEXT_PROMPT
},
"targets": ["bbox"],
"bbox_threshold": BOX_THRESHOLD,
"iou_threshold": IOU_THRESHOLD,
}
)
client.run_task(task)
result = task.result
objects = result.objects # the list of detected objects
objects = result["objects"] # the list of detected objects
input_boxes = []
@@ -121,9 +127,9 @@ confidences = []
class_names = []
for idx, obj in enumerate(objects):
input_boxes.append(obj.bbox)
confidences.append(obj.score)
class_names.append(obj.category)
input_boxes.append(obj["bbox"])
confidences.append(obj["score"])
class_names.append(obj["category"])
input_boxes = np.array(input_boxes)

View File

@@ -1,10 +1,7 @@
# dds cloudapi for Grounding DINO 1.5
# dds cloudapi for Grounding DINO 1.5 - 更新至V2Task API
from dds_cloudapi_sdk import Config
from dds_cloudapi_sdk import Client
from dds_cloudapi_sdk import DetectionTask
from dds_cloudapi_sdk import TextPrompt
from dds_cloudapi_sdk import DetectionModel
from dds_cloudapi_sdk import DetectionTarget
from dds_cloudapi_sdk.tasks.v2_task import V2Task
import os
import cv2
@@ -31,6 +28,7 @@ SAVE_TRACKING_RESULTS_DIR = "./tracking_results"
API_TOKEN_FOR_GD1_5 = "Your API token"
PROMPT_TYPE_FOR_VIDEO = "box" # choose from ["point", "box", "mask"]
BOX_THRESHOLD = 0.2
IOU_THRESHOLD = 0.8 # 添加IOU阈值参数
"""
Step 1: Environment settings and model initialization for SAM 2
@@ -99,33 +97,38 @@ config = Config(API_TOKEN_FOR_GD1_5)
# Step 2: initialize the client
client = Client(config)
# Step 3: run the task by DetectionTask class
# image_url = "https://algosplt.oss-cn-shenzhen.aliyuncs.com/test_files/tasks/detection/iron_man.jpg"
# Step 3: run the task using V2Task class
# if you are processing local image file, upload them to DDS server to get the image url
image_url = client.upload_file(img_path)
task = DetectionTask(
image_url=image_url,
prompts=[TextPrompt(text=TEXT_PROMPT)],
targets=[DetectionTarget.BBox], # detect bbox
model=DetectionModel.GDino1_6_Pro, # detect with GroundingDino-1.5-Pro model
bbox_threshold=BOX_THRESHOLD,
task = V2Task(
api_path="/v2/task/grounding_dino/detection",
api_body={
"model": "GroundingDino-1.5-Pro",
"image": image_url,
"prompt": {
"type": "text",
"text": TEXT_PROMPT
},
"targets": ["bbox"],
"bbox_threshold": BOX_THRESHOLD,
"iou_threshold": IOU_THRESHOLD,
}
)
client.run_task(task)
result = task.result
objects = result.objects # the list of detected objects
objects = result["objects"] # the list of detected objects
input_boxes = []
confidences = []
class_names = []
for idx, obj in enumerate(objects):
input_boxes.append(obj.bbox)
confidences.append(obj.score)
class_names.append(obj.category)
input_boxes.append(obj["bbox"])
confidences.append(obj["score"])
class_names.append(obj["category"])
input_boxes = np.array(input_boxes)