Loading...
Loading...
Run ML model inference (YOLO, YOLOv8, CLIP, SAM, Detectron2, etc.) on FiftyOne datasets. Use when running models, applying detection, classification, segmentation, embeddings, or any model prediction task. Also use for end-to-end workflows that include importing data then running inference.
npx skill4agent add voxel51/fiftyone-skills fiftyone-dataset-inferencelist_datasets()set_context(dataset_name="my-dataset")launch_app(dataset_name="my-dataset")predictionsdetectionsembeddingsclose_app()list_datasets()set_context(dataset_name="my-dataset")
dataset_summary(name="my-dataset")launch_app(dataset_name="my-dataset")execute_operator(
operator_uri="@voxel51/zoo/apply_zoo_model",
params={
"tab": "BUILTIN",
"model": "yolov8n-coco-torch",
"label_field": "predictions"
}
)set_view(exists=["predictions"])close_app()install_command| Model | Description | Extra Deps |
|---|---|---|
| Faster R-CNN | None |
| RetinaNet | None |
| YOLOv8 nano (fast) | ultralytics |
| YOLOv8 small | ultralytics |
| YOLOv8 medium | ultralytics |
| YOLOv8 large | ultralytics |
| YOLOv8 extra-large | ultralytics |
| Model | Description | Extra Deps |
|---|---|---|
| ResNet-50 | None |
| MobileNet v2 | None |
| Vision Transformer | None |
| Model | Description | Extra Deps |
|---|---|---|
| Segment Anything (base) | segment-anything |
| Segment Anything (large) | segment-anything |
| Segment Anything (huge) | segment-anything |
| DeepLabV3 | None |
| Model | Description | Extra Deps |
|---|---|---|
| CLIP embeddings | open-clip-torch |
| DINOv2 small | None |
| DINOv2 base | None |
| DINOv2 large | None |
# Verify dataset exists
list_datasets()
# Set context and launch
set_context(dataset_name="my-dataset")
launch_app(dataset_name="my-dataset")
# Apply detection model
execute_operator(
operator_uri="@voxel51/zoo/apply_zoo_model",
params={
"tab": "BUILTIN",
"model": "faster-rcnn-resnet50-fpn-coco-torch",
"label_field": "predictions"
}
)
# View results
set_view(exists=["predictions"])set_context(dataset_name="my-dataset")
launch_app(dataset_name="my-dataset")
execute_operator(
operator_uri="@voxel51/zoo/apply_zoo_model",
params={
"tab": "BUILTIN",
"model": "resnet50-imagenet-torch",
"label_field": "classification"
}
)
set_view(exists=["classification"])set_context(dataset_name="my-dataset")
launch_app(dataset_name="my-dataset")
execute_operator(
operator_uri="@voxel51/zoo/apply_zoo_model",
params={
"tab": "BUILTIN",
"model": "clip-vit-base32-torch",
"label_field": "clip_embeddings"
}
)set_context(dataset_name="my-dataset")
dataset_summary(name="my-dataset") # Check existing fields
launch_app(dataset_name="my-dataset")
# Run inference with different field name
execute_operator(
operator_uri="@voxel51/zoo/apply_zoo_model",
params={
"tab": "BUILTIN",
"model": "yolov8m-coco-torch",
"label_field": "predictions" # Different from ground_truth
}
)
# View both fields to compare
set_view(exists=["ground_truth", "predictions"])set_context(dataset_name="my-dataset")
launch_app(dataset_name="my-dataset")
# Run detection
execute_operator(
operator_uri="@voxel51/zoo/apply_zoo_model",
params={
"tab": "BUILTIN",
"model": "yolov8n-coco-torch",
"label_field": "detections"
}
)
# Run classification
execute_operator(
operator_uri="@voxel51/zoo/apply_zoo_model",
params={
"tab": "BUILTIN",
"model": "resnet50-imagenet-torch",
"label_field": "classification"
}
)
# Run embeddings
execute_operator(
operator_uri="@voxel51/zoo/apply_zoo_model",
params={
"tab": "BUILTIN",
"model": "clip-vit-base32-torch",
"label_field": "embeddings"
}
)list_datasets()get_operator_schema("@voxel51/zoo/apply_zoo_model")missing_packageinstall_commandpip install <package>yolov8nyolov8xpredictionsyolo_detectionsclip_embeddingsdataset_summary()