init commit
This commit is contained in:
12
ultralytics/models/sam/__init__.py
Normal file
12
ultralytics/models/sam/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
||||
|
||||
from .model import SAM
|
||||
from .predict import Predictor, SAM2DynamicInteractivePredictor, SAM2Predictor, SAM2VideoPredictor
|
||||
|
||||
__all__ = (
|
||||
"SAM",
|
||||
"Predictor",
|
||||
"SAM2Predictor",
|
||||
"SAM2VideoPredictor",
|
||||
"SAM2DynamicInteractivePredictor",
|
||||
) # tuple or list of exportable items
|
||||
BIN
ultralytics/models/sam/__pycache__/__init__.cpython-310.pyc
Normal file
BIN
ultralytics/models/sam/__pycache__/__init__.cpython-310.pyc
Normal file
Binary file not shown.
BIN
ultralytics/models/sam/__pycache__/amg.cpython-310.pyc
Normal file
BIN
ultralytics/models/sam/__pycache__/amg.cpython-310.pyc
Normal file
Binary file not shown.
BIN
ultralytics/models/sam/__pycache__/model.cpython-310.pyc
Normal file
BIN
ultralytics/models/sam/__pycache__/model.cpython-310.pyc
Normal file
Binary file not shown.
BIN
ultralytics/models/sam/__pycache__/predict.cpython-310.pyc
Normal file
BIN
ultralytics/models/sam/__pycache__/predict.cpython-310.pyc
Normal file
Binary file not shown.
281
ultralytics/models/sam/amg.py
Normal file
281
ultralytics/models/sam/amg.py
Normal file
@@ -0,0 +1,281 @@
|
||||
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from collections.abc import Generator
|
||||
from itertools import product
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
|
||||
def is_box_near_crop_edge(
|
||||
boxes: torch.Tensor, crop_box: list[int], orig_box: list[int], atol: float = 20.0
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Determine if bounding boxes are near the edge of a cropped image region using a specified tolerance.
|
||||
|
||||
Args:
|
||||
boxes (torch.Tensor): Bounding boxes in XYXY format.
|
||||
crop_box (list[int]): Crop box coordinates in [x0, y0, x1, y1] format.
|
||||
orig_box (list[int]): Original image box coordinates in [x0, y0, x1, y1] format.
|
||||
atol (float, optional): Absolute tolerance for edge proximity detection.
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): Boolean tensor indicating which boxes are near crop edges.
|
||||
|
||||
Examples:
|
||||
>>> boxes = torch.tensor([[10, 10, 50, 50], [100, 100, 150, 150]])
|
||||
>>> crop_box = [0, 0, 200, 200]
|
||||
>>> orig_box = [0, 0, 300, 300]
|
||||
>>> near_edge = is_box_near_crop_edge(boxes, crop_box, orig_box, atol=20.0)
|
||||
"""
|
||||
crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
|
||||
orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
|
||||
boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
|
||||
near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
|
||||
near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
|
||||
near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
|
||||
return torch.any(near_crop_edge, dim=1)
|
||||
|
||||
|
||||
def batch_iterator(batch_size: int, *args) -> Generator[list[Any]]:
|
||||
"""
|
||||
Yield batches of data from input arguments with specified batch size for efficient processing.
|
||||
|
||||
This function takes a batch size and any number of iterables, then yields batches of elements from those
|
||||
iterables. All input iterables must have the same length.
|
||||
|
||||
Args:
|
||||
batch_size (int): Size of each batch to yield.
|
||||
*args (Any): Variable length input iterables to batch. All iterables must have the same length.
|
||||
|
||||
Yields:
|
||||
(list[Any]): A list of batched elements from each input iterable.
|
||||
|
||||
Examples:
|
||||
>>> data = [1, 2, 3, 4, 5]
|
||||
>>> labels = ["a", "b", "c", "d", "e"]
|
||||
>>> for batch in batch_iterator(2, data, labels):
|
||||
... print(batch)
|
||||
[[1, 2], ['a', 'b']]
|
||||
[[3, 4], ['c', 'd']]
|
||||
[[5], ['e']]
|
||||
"""
|
||||
assert args and all(len(a) == len(args[0]) for a in args), "Batched iteration must have same-size inputs."
|
||||
n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
|
||||
for b in range(n_batches):
|
||||
yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
|
||||
|
||||
|
||||
def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, threshold_offset: float) -> torch.Tensor:
|
||||
"""
|
||||
Compute the stability score for a batch of masks.
|
||||
|
||||
The stability score is the IoU between binary masks obtained by thresholding the predicted mask logits at
|
||||
high and low values.
|
||||
|
||||
Args:
|
||||
masks (torch.Tensor): Batch of predicted mask logits.
|
||||
mask_threshold (float): Threshold value for creating binary masks.
|
||||
threshold_offset (float): Offset applied to the threshold for creating high and low binary masks.
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): Stability scores for each mask in the batch.
|
||||
|
||||
Notes:
|
||||
- One mask is always contained inside the other.
|
||||
- Memory is saved by preventing unnecessary cast to torch.int64.
|
||||
|
||||
Examples:
|
||||
>>> masks = torch.rand(10, 256, 256) # Batch of 10 masks
|
||||
>>> mask_threshold = 0.5
|
||||
>>> threshold_offset = 0.1
|
||||
>>> stability_scores = calculate_stability_score(masks, mask_threshold, threshold_offset)
|
||||
"""
|
||||
intersections = (masks > (mask_threshold + threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
|
||||
unions = (masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
|
||||
return intersections / unions
|
||||
|
||||
|
||||
def build_point_grid(n_per_side: int) -> np.ndarray:
|
||||
"""Generate a 2D grid of evenly spaced points in the range [0,1]x[0,1] for image segmentation tasks."""
|
||||
offset = 1 / (2 * n_per_side)
|
||||
points_one_side = np.linspace(offset, 1 - offset, n_per_side)
|
||||
points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
|
||||
points_y = np.tile(points_one_side[:, None], (1, n_per_side))
|
||||
return np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
|
||||
|
||||
|
||||
def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer: int) -> list[np.ndarray]:
|
||||
"""Generate point grids for multiple crop layers with varying scales and densities."""
|
||||
return [build_point_grid(int(n_per_side / (scale_per_layer**i))) for i in range(n_layers + 1)]
|
||||
|
||||
|
||||
def generate_crop_boxes(
|
||||
im_size: tuple[int, ...], n_layers: int, overlap_ratio: float
|
||||
) -> tuple[list[list[int]], list[int]]:
|
||||
"""
|
||||
Generate crop boxes of varying sizes for multiscale image processing, with layered overlapping regions.
|
||||
|
||||
Args:
|
||||
im_size (tuple[int, ...]): Height and width of the input image.
|
||||
n_layers (int): Number of layers to generate crop boxes for.
|
||||
overlap_ratio (float): Ratio of overlap between adjacent crop boxes.
|
||||
|
||||
Returns:
|
||||
crop_boxes (list[list[int]]): List of crop boxes in [x0, y0, x1, y1] format.
|
||||
layer_idxs (list[int]): List of layer indices corresponding to each crop box.
|
||||
|
||||
Examples:
|
||||
>>> im_size = (800, 1200) # Height, width
|
||||
>>> n_layers = 3
|
||||
>>> overlap_ratio = 0.25
|
||||
>>> crop_boxes, layer_idxs = generate_crop_boxes(im_size, n_layers, overlap_ratio)
|
||||
"""
|
||||
crop_boxes, layer_idxs = [], []
|
||||
im_h, im_w = im_size
|
||||
short_side = min(im_h, im_w)
|
||||
|
||||
# Original image
|
||||
crop_boxes.append([0, 0, im_w, im_h])
|
||||
layer_idxs.append(0)
|
||||
|
||||
def crop_len(orig_len, n_crops, overlap):
|
||||
"""Calculate the length of each crop given the original length, number of crops, and overlap."""
|
||||
return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
|
||||
|
||||
for i_layer in range(n_layers):
|
||||
n_crops_per_side = 2 ** (i_layer + 1)
|
||||
overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
|
||||
|
||||
crop_w = crop_len(im_w, n_crops_per_side, overlap)
|
||||
crop_h = crop_len(im_h, n_crops_per_side, overlap)
|
||||
|
||||
crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
|
||||
crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
|
||||
|
||||
# Crops in XYWH format
|
||||
for x0, y0 in product(crop_box_x0, crop_box_y0):
|
||||
box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
|
||||
crop_boxes.append(box)
|
||||
layer_idxs.append(i_layer + 1)
|
||||
|
||||
return crop_boxes, layer_idxs
|
||||
|
||||
|
||||
def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: list[int]) -> torch.Tensor:
|
||||
"""Uncrop bounding boxes by adding the crop box offset to their coordinates."""
|
||||
x0, y0, _, _ = crop_box
|
||||
offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
|
||||
# Check if boxes has a channel dimension
|
||||
if len(boxes.shape) == 3:
|
||||
offset = offset.unsqueeze(1)
|
||||
return boxes + offset
|
||||
|
||||
|
||||
def uncrop_points(points: torch.Tensor, crop_box: list[int]) -> torch.Tensor:
|
||||
"""Uncrop points by adding the crop box offset to their coordinates."""
|
||||
x0, y0, _, _ = crop_box
|
||||
offset = torch.tensor([[x0, y0]], device=points.device)
|
||||
# Check if points has a channel dimension
|
||||
if len(points.shape) == 3:
|
||||
offset = offset.unsqueeze(1)
|
||||
return points + offset
|
||||
|
||||
|
||||
def uncrop_masks(masks: torch.Tensor, crop_box: list[int], orig_h: int, orig_w: int) -> torch.Tensor:
|
||||
"""Uncrop masks by padding them to the original image size, handling coordinate transformations."""
|
||||
x0, y0, x1, y1 = crop_box
|
||||
if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
|
||||
return masks
|
||||
# Coordinate transform masks
|
||||
pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
|
||||
pad = (x0, pad_x - x0, y0, pad_y - y0)
|
||||
return torch.nn.functional.pad(masks, pad, value=0)
|
||||
|
||||
|
||||
def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> tuple[np.ndarray, bool]:
|
||||
"""
|
||||
Remove small disconnected regions or holes in a mask based on area threshold and mode.
|
||||
|
||||
Args:
|
||||
mask (np.ndarray): Binary mask to process.
|
||||
area_thresh (float): Area threshold below which regions will be removed.
|
||||
mode (str): Processing mode, either 'holes' to fill small holes or 'islands' to remove small disconnected
|
||||
regions.
|
||||
|
||||
Returns:
|
||||
processed_mask (np.ndarray): Processed binary mask with small regions removed.
|
||||
modified (bool): Whether any regions were modified.
|
||||
|
||||
Examples:
|
||||
>>> mask = np.zeros((100, 100), dtype=np.bool_)
|
||||
>>> mask[40:60, 40:60] = True # Create a square
|
||||
>>> mask[45:55, 45:55] = False # Create a hole
|
||||
>>> processed_mask, modified = remove_small_regions(mask, 50, "holes")
|
||||
"""
|
||||
import cv2 # type: ignore
|
||||
|
||||
assert mode in {"holes", "islands"}, f"Provided mode {mode} is invalid"
|
||||
correct_holes = mode == "holes"
|
||||
working_mask = (correct_holes ^ mask).astype(np.uint8)
|
||||
n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
|
||||
sizes = stats[:, -1][1:] # Row 0 is background label
|
||||
small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
|
||||
if not small_regions:
|
||||
return mask, False
|
||||
fill_labels = [0] + small_regions
|
||||
if not correct_holes:
|
||||
# If every region is below threshold, keep largest
|
||||
fill_labels = [i for i in range(n_labels) if i not in fill_labels] or [int(np.argmax(sizes)) + 1]
|
||||
mask = np.isin(regions, fill_labels)
|
||||
return mask, True
|
||||
|
||||
|
||||
def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
Calculate bounding boxes in XYXY format around binary masks.
|
||||
|
||||
Args:
|
||||
masks (torch.Tensor): Binary masks with shape (B, H, W) or (B, C, H, W).
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): Bounding boxes in XYXY format with shape (B, 4) or (B, C, 4).
|
||||
|
||||
Notes:
|
||||
- Handles empty masks by returning zero boxes.
|
||||
- Preserves input tensor dimensions in the output.
|
||||
"""
|
||||
# torch.max below raises an error on empty inputs, just skip in this case
|
||||
if torch.numel(masks) == 0:
|
||||
return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
|
||||
|
||||
# Normalize shape to CxHxW
|
||||
shape = masks.shape
|
||||
h, w = shape[-2:]
|
||||
masks = masks.flatten(0, -3) if len(shape) > 2 else masks.unsqueeze(0)
|
||||
# Get top and bottom edges
|
||||
in_height, _ = torch.max(masks, dim=-1)
|
||||
in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
|
||||
bottom_edges, _ = torch.max(in_height_coords, dim=-1)
|
||||
in_height_coords = in_height_coords + h * (~in_height)
|
||||
top_edges, _ = torch.min(in_height_coords, dim=-1)
|
||||
|
||||
# Get left and right edges
|
||||
in_width, _ = torch.max(masks, dim=-2)
|
||||
in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
|
||||
right_edges, _ = torch.max(in_width_coords, dim=-1)
|
||||
in_width_coords = in_width_coords + w * (~in_width)
|
||||
left_edges, _ = torch.min(in_width_coords, dim=-1)
|
||||
|
||||
# If the mask is empty the right edge will be to the left of the left edge.
|
||||
# Replace these boxes with [0, 0, 0, 0]
|
||||
empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
|
||||
out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
|
||||
out = out * (~empty_filter).unsqueeze(-1)
|
||||
|
||||
# Return to original shape
|
||||
return out.reshape(*shape[:-2], 4) if len(shape) > 2 else out[0]
|
||||
358
ultralytics/models/sam/build.py
Normal file
358
ultralytics/models/sam/build.py
Normal file
@@ -0,0 +1,358 @@
|
||||
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
||||
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
|
||||
# This source code is licensed under the license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
from functools import partial
|
||||
|
||||
import torch
|
||||
|
||||
from ultralytics.utils.downloads import attempt_download_asset
|
||||
|
||||
from .modules.decoders import MaskDecoder
|
||||
from .modules.encoders import FpnNeck, Hiera, ImageEncoder, ImageEncoderViT, MemoryEncoder, PromptEncoder
|
||||
from .modules.memory_attention import MemoryAttention, MemoryAttentionLayer
|
||||
from .modules.sam import SAM2Model, SAMModel
|
||||
from .modules.tiny_encoder import TinyViT
|
||||
from .modules.transformer import TwoWayTransformer
|
||||
|
||||
|
||||
def build_sam_vit_h(checkpoint=None):
|
||||
"""Build and return a Segment Anything Model (SAM) h-size model with specified encoder parameters."""
|
||||
return _build_sam(
|
||||
encoder_embed_dim=1280,
|
||||
encoder_depth=32,
|
||||
encoder_num_heads=16,
|
||||
encoder_global_attn_indexes=[7, 15, 23, 31],
|
||||
checkpoint=checkpoint,
|
||||
)
|
||||
|
||||
|
||||
def build_sam_vit_l(checkpoint=None):
|
||||
"""Build and return a Segment Anything Model (SAM) l-size model with specified encoder parameters."""
|
||||
return _build_sam(
|
||||
encoder_embed_dim=1024,
|
||||
encoder_depth=24,
|
||||
encoder_num_heads=16,
|
||||
encoder_global_attn_indexes=[5, 11, 17, 23],
|
||||
checkpoint=checkpoint,
|
||||
)
|
||||
|
||||
|
||||
def build_sam_vit_b(checkpoint=None):
|
||||
"""Build and return a Segment Anything Model (SAM) b-size model with specified encoder parameters."""
|
||||
return _build_sam(
|
||||
encoder_embed_dim=768,
|
||||
encoder_depth=12,
|
||||
encoder_num_heads=12,
|
||||
encoder_global_attn_indexes=[2, 5, 8, 11],
|
||||
checkpoint=checkpoint,
|
||||
)
|
||||
|
||||
|
||||
def build_mobile_sam(checkpoint=None):
|
||||
"""Build and return a Mobile Segment Anything Model (Mobile-SAM) for efficient image segmentation."""
|
||||
return _build_sam(
|
||||
encoder_embed_dim=[64, 128, 160, 320],
|
||||
encoder_depth=[2, 2, 6, 2],
|
||||
encoder_num_heads=[2, 4, 5, 10],
|
||||
encoder_global_attn_indexes=None,
|
||||
mobile_sam=True,
|
||||
checkpoint=checkpoint,
|
||||
)
|
||||
|
||||
|
||||
def build_sam2_t(checkpoint=None):
|
||||
"""Build and return a Segment Anything Model 2 (SAM2) tiny-size model with specified architecture parameters."""
|
||||
return _build_sam2(
|
||||
encoder_embed_dim=96,
|
||||
encoder_stages=[1, 2, 7, 2],
|
||||
encoder_num_heads=1,
|
||||
encoder_global_att_blocks=[5, 7, 9],
|
||||
encoder_window_spec=[8, 4, 14, 7],
|
||||
encoder_backbone_channel_list=[768, 384, 192, 96],
|
||||
checkpoint=checkpoint,
|
||||
)
|
||||
|
||||
|
||||
def build_sam2_s(checkpoint=None):
|
||||
"""Build and return a small-size Segment Anything Model 2 (SAM2) with specified architecture parameters."""
|
||||
return _build_sam2(
|
||||
encoder_embed_dim=96,
|
||||
encoder_stages=[1, 2, 11, 2],
|
||||
encoder_num_heads=1,
|
||||
encoder_global_att_blocks=[7, 10, 13],
|
||||
encoder_window_spec=[8, 4, 14, 7],
|
||||
encoder_backbone_channel_list=[768, 384, 192, 96],
|
||||
checkpoint=checkpoint,
|
||||
)
|
||||
|
||||
|
||||
def build_sam2_b(checkpoint=None):
|
||||
"""Build and return a Segment Anything Model 2 (SAM2) base-size model with specified architecture parameters."""
|
||||
return _build_sam2(
|
||||
encoder_embed_dim=112,
|
||||
encoder_stages=[2, 3, 16, 3],
|
||||
encoder_num_heads=2,
|
||||
encoder_global_att_blocks=[12, 16, 20],
|
||||
encoder_window_spec=[8, 4, 14, 7],
|
||||
encoder_window_spatial_size=[14, 14],
|
||||
encoder_backbone_channel_list=[896, 448, 224, 112],
|
||||
checkpoint=checkpoint,
|
||||
)
|
||||
|
||||
|
||||
def build_sam2_l(checkpoint=None):
|
||||
"""Build and return a large-size Segment Anything Model 2 (SAM2) with specified architecture parameters."""
|
||||
return _build_sam2(
|
||||
encoder_embed_dim=144,
|
||||
encoder_stages=[2, 6, 36, 4],
|
||||
encoder_num_heads=2,
|
||||
encoder_global_att_blocks=[23, 33, 43],
|
||||
encoder_window_spec=[8, 4, 16, 8],
|
||||
encoder_backbone_channel_list=[1152, 576, 288, 144],
|
||||
checkpoint=checkpoint,
|
||||
)
|
||||
|
||||
|
||||
def _build_sam(
|
||||
encoder_embed_dim,
|
||||
encoder_depth,
|
||||
encoder_num_heads,
|
||||
encoder_global_attn_indexes,
|
||||
checkpoint=None,
|
||||
mobile_sam=False,
|
||||
):
|
||||
"""
|
||||
Build a Segment Anything Model (SAM) with specified encoder parameters.
|
||||
|
||||
Args:
|
||||
encoder_embed_dim (int | list[int]): Embedding dimension for the encoder.
|
||||
encoder_depth (int | list[int]): Depth of the encoder.
|
||||
encoder_num_heads (int | list[int]): Number of attention heads in the encoder.
|
||||
encoder_global_attn_indexes (list[int] | None): Indexes for global attention in the encoder.
|
||||
checkpoint (str | None, optional): Path to the model checkpoint file.
|
||||
mobile_sam (bool, optional): Whether to build a Mobile-SAM model.
|
||||
|
||||
Returns:
|
||||
(SAMModel): A Segment Anything Model instance with the specified architecture.
|
||||
|
||||
Examples:
|
||||
>>> sam = _build_sam(768, 12, 12, [2, 5, 8, 11])
|
||||
>>> sam = _build_sam([64, 128, 160, 320], [2, 2, 6, 2], [2, 4, 5, 10], None, mobile_sam=True)
|
||||
"""
|
||||
prompt_embed_dim = 256
|
||||
image_size = 1024
|
||||
vit_patch_size = 16
|
||||
image_embedding_size = image_size // vit_patch_size
|
||||
image_encoder = (
|
||||
TinyViT(
|
||||
img_size=1024,
|
||||
in_chans=3,
|
||||
num_classes=1000,
|
||||
embed_dims=encoder_embed_dim,
|
||||
depths=encoder_depth,
|
||||
num_heads=encoder_num_heads,
|
||||
window_sizes=[7, 7, 14, 7],
|
||||
mlp_ratio=4.0,
|
||||
drop_rate=0.0,
|
||||
drop_path_rate=0.0,
|
||||
use_checkpoint=False,
|
||||
mbconv_expand_ratio=4.0,
|
||||
local_conv_size=3,
|
||||
layer_lr_decay=0.8,
|
||||
)
|
||||
if mobile_sam
|
||||
else ImageEncoderViT(
|
||||
depth=encoder_depth,
|
||||
embed_dim=encoder_embed_dim,
|
||||
img_size=image_size,
|
||||
mlp_ratio=4,
|
||||
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
|
||||
num_heads=encoder_num_heads,
|
||||
patch_size=vit_patch_size,
|
||||
qkv_bias=True,
|
||||
use_rel_pos=True,
|
||||
global_attn_indexes=encoder_global_attn_indexes,
|
||||
window_size=14,
|
||||
out_chans=prompt_embed_dim,
|
||||
)
|
||||
)
|
||||
sam = SAMModel(
|
||||
image_encoder=image_encoder,
|
||||
prompt_encoder=PromptEncoder(
|
||||
embed_dim=prompt_embed_dim,
|
||||
image_embedding_size=(image_embedding_size, image_embedding_size),
|
||||
input_image_size=(image_size, image_size),
|
||||
mask_in_chans=16,
|
||||
),
|
||||
mask_decoder=MaskDecoder(
|
||||
num_multimask_outputs=3,
|
||||
transformer=TwoWayTransformer(
|
||||
depth=2,
|
||||
embedding_dim=prompt_embed_dim,
|
||||
mlp_dim=2048,
|
||||
num_heads=8,
|
||||
),
|
||||
transformer_dim=prompt_embed_dim,
|
||||
iou_head_depth=3,
|
||||
iou_head_hidden_dim=256,
|
||||
),
|
||||
pixel_mean=[123.675, 116.28, 103.53],
|
||||
pixel_std=[58.395, 57.12, 57.375],
|
||||
)
|
||||
if checkpoint is not None:
|
||||
checkpoint = attempt_download_asset(checkpoint)
|
||||
with open(checkpoint, "rb") as f:
|
||||
state_dict = torch.load(f)
|
||||
sam.load_state_dict(state_dict)
|
||||
sam.eval()
|
||||
return sam
|
||||
|
||||
|
||||
def _build_sam2(
|
||||
encoder_embed_dim=1280,
|
||||
encoder_stages=[2, 6, 36, 4],
|
||||
encoder_num_heads=2,
|
||||
encoder_global_att_blocks=[7, 15, 23, 31],
|
||||
encoder_backbone_channel_list=[1152, 576, 288, 144],
|
||||
encoder_window_spatial_size=[7, 7],
|
||||
encoder_window_spec=[8, 4, 16, 8],
|
||||
checkpoint=None,
|
||||
):
|
||||
"""
|
||||
Build and return a Segment Anything Model 2 (SAM2) with specified architecture parameters.
|
||||
|
||||
Args:
|
||||
encoder_embed_dim (int, optional): Embedding dimension for the encoder.
|
||||
encoder_stages (list[int], optional): Number of blocks in each stage of the encoder.
|
||||
encoder_num_heads (int, optional): Number of attention heads in the encoder.
|
||||
encoder_global_att_blocks (list[int], optional): Indices of global attention blocks in the encoder.
|
||||
encoder_backbone_channel_list (list[int], optional): Channel dimensions for each level of the encoder backbone.
|
||||
encoder_window_spatial_size (list[int], optional): Spatial size of the window for position embeddings.
|
||||
encoder_window_spec (list[int], optional): Window specifications for each stage of the encoder.
|
||||
checkpoint (str | None, optional): Path to the checkpoint file for loading pre-trained weights.
|
||||
|
||||
Returns:
|
||||
(SAM2Model): A configured and initialized SAM2 model.
|
||||
|
||||
Examples:
|
||||
>>> sam2_model = _build_sam2(encoder_embed_dim=96, encoder_stages=[1, 2, 7, 2])
|
||||
>>> sam2_model.eval()
|
||||
"""
|
||||
image_encoder = ImageEncoder(
|
||||
trunk=Hiera(
|
||||
embed_dim=encoder_embed_dim,
|
||||
num_heads=encoder_num_heads,
|
||||
stages=encoder_stages,
|
||||
global_att_blocks=encoder_global_att_blocks,
|
||||
window_pos_embed_bkg_spatial_size=encoder_window_spatial_size,
|
||||
window_spec=encoder_window_spec,
|
||||
),
|
||||
neck=FpnNeck(
|
||||
d_model=256,
|
||||
backbone_channel_list=encoder_backbone_channel_list,
|
||||
fpn_top_down_levels=[2, 3],
|
||||
fpn_interp_model="nearest",
|
||||
),
|
||||
scalp=1,
|
||||
)
|
||||
memory_attention = MemoryAttention(d_model=256, pos_enc_at_input=True, num_layers=4, layer=MemoryAttentionLayer())
|
||||
memory_encoder = MemoryEncoder(out_dim=64)
|
||||
|
||||
is_sam2_1 = checkpoint is not None and "sam2.1" in checkpoint
|
||||
sam2 = SAM2Model(
|
||||
image_encoder=image_encoder,
|
||||
memory_attention=memory_attention,
|
||||
memory_encoder=memory_encoder,
|
||||
num_maskmem=7,
|
||||
image_size=1024,
|
||||
sigmoid_scale_for_mem_enc=20.0,
|
||||
sigmoid_bias_for_mem_enc=-10.0,
|
||||
use_mask_input_as_output_without_sam=True,
|
||||
directly_add_no_mem_embed=True,
|
||||
use_high_res_features_in_sam=True,
|
||||
multimask_output_in_sam=True,
|
||||
iou_prediction_use_sigmoid=True,
|
||||
use_obj_ptrs_in_encoder=True,
|
||||
add_tpos_enc_to_obj_ptrs=True,
|
||||
only_obj_ptrs_in_the_past_for_eval=True,
|
||||
pred_obj_scores=True,
|
||||
pred_obj_scores_mlp=True,
|
||||
fixed_no_obj_ptr=True,
|
||||
multimask_output_for_tracking=True,
|
||||
use_multimask_token_for_obj_ptr=True,
|
||||
multimask_min_pt_num=0,
|
||||
multimask_max_pt_num=1,
|
||||
use_mlp_for_obj_ptr_proj=True,
|
||||
compile_image_encoder=False,
|
||||
no_obj_embed_spatial=is_sam2_1,
|
||||
proj_tpos_enc_in_obj_ptrs=is_sam2_1,
|
||||
use_signed_tpos_enc_to_obj_ptrs=is_sam2_1,
|
||||
sam_mask_decoder_extra_args=dict(
|
||||
dynamic_multimask_via_stability=True,
|
||||
dynamic_multimask_stability_delta=0.05,
|
||||
dynamic_multimask_stability_thresh=0.98,
|
||||
),
|
||||
)
|
||||
|
||||
if checkpoint is not None:
|
||||
checkpoint = attempt_download_asset(checkpoint)
|
||||
with open(checkpoint, "rb") as f:
|
||||
state_dict = torch.load(f)["model"]
|
||||
sam2.load_state_dict(state_dict)
|
||||
sam2.eval()
|
||||
return sam2
|
||||
|
||||
|
||||
sam_model_map = {
|
||||
"sam_h.pt": build_sam_vit_h,
|
||||
"sam_l.pt": build_sam_vit_l,
|
||||
"sam_b.pt": build_sam_vit_b,
|
||||
"mobile_sam.pt": build_mobile_sam,
|
||||
"sam2_t.pt": build_sam2_t,
|
||||
"sam2_s.pt": build_sam2_s,
|
||||
"sam2_b.pt": build_sam2_b,
|
||||
"sam2_l.pt": build_sam2_l,
|
||||
"sam2.1_t.pt": build_sam2_t,
|
||||
"sam2.1_s.pt": build_sam2_s,
|
||||
"sam2.1_b.pt": build_sam2_b,
|
||||
"sam2.1_l.pt": build_sam2_l,
|
||||
}
|
||||
|
||||
|
||||
def build_sam(ckpt="sam_b.pt"):
|
||||
"""
|
||||
Build and return a Segment Anything Model (SAM) based on the provided checkpoint.
|
||||
|
||||
Args:
|
||||
ckpt (str | Path, optional): Path to the checkpoint file or name of a pre-defined SAM model.
|
||||
|
||||
Returns:
|
||||
(SAMModel | SAM2Model): A configured and initialized SAM or SAM2 model instance.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If the provided checkpoint is not a supported SAM model.
|
||||
|
||||
Examples:
|
||||
>>> sam_model = build_sam("sam_b.pt")
|
||||
>>> sam_model = build_sam("path/to/custom_checkpoint.pt")
|
||||
|
||||
Notes:
|
||||
Supported pre-defined models include:
|
||||
- SAM: 'sam_h.pt', 'sam_l.pt', 'sam_b.pt', 'mobile_sam.pt'
|
||||
- SAM2: 'sam2_t.pt', 'sam2_s.pt', 'sam2_b.pt', 'sam2_l.pt'
|
||||
"""
|
||||
model_builder = None
|
||||
ckpt = str(ckpt) # to allow Path ckpt types
|
||||
for k in sam_model_map.keys():
|
||||
if ckpt.endswith(k):
|
||||
model_builder = sam_model_map.get(k)
|
||||
|
||||
if not model_builder:
|
||||
raise FileNotFoundError(f"{ckpt} is not a supported SAM model. Available models are: \n {sam_model_map.keys()}")
|
||||
|
||||
return model_builder(ckpt)
|
||||
172
ultralytics/models/sam/model.py
Normal file
172
ultralytics/models/sam/model.py
Normal file
@@ -0,0 +1,172 @@
|
||||
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
||||
"""
|
||||
SAM model interface.
|
||||
|
||||
This module provides an interface to the Segment Anything Model (SAM) from ultralytics, designed for real-time image
|
||||
segmentation tasks. The SAM model allows for promptable segmentation with unparalleled versatility in image analysis,
|
||||
and has been trained on the SA-1B dataset. It features zero-shot performance capabilities, enabling it to adapt to new
|
||||
image distributions and tasks without prior knowledge.
|
||||
|
||||
Key Features:
|
||||
- Promptable segmentation
|
||||
- Real-time performance
|
||||
- Zero-shot transfer capabilities
|
||||
- Trained on SA-1B dataset
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from ultralytics.engine.model import Model
|
||||
from ultralytics.utils.torch_utils import model_info
|
||||
|
||||
from .predict import Predictor, SAM2Predictor
|
||||
|
||||
|
||||
class SAM(Model):
|
||||
"""
|
||||
SAM (Segment Anything Model) interface class for real-time image segmentation tasks.
|
||||
|
||||
This class provides an interface to the Segment Anything Model (SAM) from ultralytics, designed for
|
||||
promptable segmentation with versatility in image analysis. It supports various prompts such as bounding
|
||||
boxes, points, or labels, and features zero-shot performance capabilities.
|
||||
|
||||
Attributes:
|
||||
model (torch.nn.Module): The loaded SAM model.
|
||||
is_sam2 (bool): Indicates whether the model is SAM2 variant.
|
||||
task (str): The task type, set to "segment" for SAM models.
|
||||
|
||||
Methods:
|
||||
predict: Perform segmentation prediction on the given image or video source.
|
||||
info: Log information about the SAM model.
|
||||
|
||||
Examples:
|
||||
>>> sam = SAM("sam_b.pt")
|
||||
>>> results = sam.predict("image.jpg", points=[[500, 375]])
|
||||
>>> for r in results:
|
||||
>>> print(f"Detected {len(r.masks)} masks")
|
||||
"""
|
||||
|
||||
def __init__(self, model: str = "sam_b.pt") -> None:
|
||||
"""
|
||||
Initialize the SAM (Segment Anything Model) instance.
|
||||
|
||||
Args:
|
||||
model (str): Path to the pre-trained SAM model file. File should have a .pt or .pth extension.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the model file extension is not .pt or .pth.
|
||||
|
||||
Examples:
|
||||
>>> sam = SAM("sam_b.pt")
|
||||
>>> print(sam.is_sam2)
|
||||
"""
|
||||
if model and Path(model).suffix not in {".pt", ".pth"}:
|
||||
raise NotImplementedError("SAM prediction requires pre-trained *.pt or *.pth model.")
|
||||
self.is_sam2 = "sam2" in Path(model).stem
|
||||
super().__init__(model=model, task="segment")
|
||||
|
||||
def _load(self, weights: str, task=None):
|
||||
"""
|
||||
Load the specified weights into the SAM model.
|
||||
|
||||
Args:
|
||||
weights (str): Path to the weights file. Should be a .pt or .pth file containing the model parameters.
|
||||
task (str | None): Task name. If provided, it specifies the particular task the model is being loaded for.
|
||||
|
||||
Examples:
|
||||
>>> sam = SAM("sam_b.pt")
|
||||
>>> sam._load("path/to/custom_weights.pt")
|
||||
"""
|
||||
from .build import build_sam # slow import
|
||||
|
||||
self.model = build_sam(weights)
|
||||
|
||||
def predict(self, source, stream: bool = False, bboxes=None, points=None, labels=None, **kwargs):
|
||||
"""
|
||||
Perform segmentation prediction on the given image or video source.
|
||||
|
||||
Args:
|
||||
source (str | PIL.Image | np.ndarray): Path to the image or video file, or a PIL.Image object, or
|
||||
a np.ndarray object.
|
||||
stream (bool): If True, enables real-time streaming.
|
||||
bboxes (list[list[float]] | None): List of bounding box coordinates for prompted segmentation.
|
||||
points (list[list[float]] | None): List of points for prompted segmentation.
|
||||
labels (list[int] | None): List of labels for prompted segmentation.
|
||||
**kwargs (Any): Additional keyword arguments for prediction.
|
||||
|
||||
Returns:
|
||||
(list): The model predictions.
|
||||
|
||||
Examples:
|
||||
>>> sam = SAM("sam_b.pt")
|
||||
>>> results = sam.predict("image.jpg", points=[[500, 375]])
|
||||
>>> for r in results:
|
||||
... print(f"Detected {len(r.masks)} masks")
|
||||
"""
|
||||
overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024)
|
||||
kwargs = {**overrides, **kwargs}
|
||||
prompts = dict(bboxes=bboxes, points=points, labels=labels)
|
||||
return super().predict(source, stream, prompts=prompts, **kwargs)
|
||||
|
||||
def __call__(self, source=None, stream: bool = False, bboxes=None, points=None, labels=None, **kwargs):
|
||||
"""
|
||||
Perform segmentation prediction on the given image or video source.
|
||||
|
||||
This method is an alias for the 'predict' method, providing a convenient way to call the SAM model
|
||||
for segmentation tasks.
|
||||
|
||||
Args:
|
||||
source (str | PIL.Image | np.ndarray | None): Path to the image or video file, or a PIL.Image
|
||||
object, or a np.ndarray object.
|
||||
stream (bool): If True, enables real-time streaming.
|
||||
bboxes (list[list[float]] | None): List of bounding box coordinates for prompted segmentation.
|
||||
points (list[list[float]] | None): List of points for prompted segmentation.
|
||||
labels (list[int] | None): List of labels for prompted segmentation.
|
||||
**kwargs (Any): Additional keyword arguments to be passed to the predict method.
|
||||
|
||||
Returns:
|
||||
(list): The model predictions, typically containing segmentation masks and other relevant information.
|
||||
|
||||
Examples:
|
||||
>>> sam = SAM("sam_b.pt")
|
||||
>>> results = sam("image.jpg", points=[[500, 375]])
|
||||
>>> print(f"Detected {len(results[0].masks)} masks")
|
||||
"""
|
||||
return self.predict(source, stream, bboxes, points, labels, **kwargs)
|
||||
|
||||
def info(self, detailed: bool = False, verbose: bool = True):
|
||||
"""
|
||||
Log information about the SAM model.
|
||||
|
||||
Args:
|
||||
detailed (bool): If True, displays detailed information about the model layers and operations.
|
||||
verbose (bool): If True, prints the information to the console.
|
||||
|
||||
Returns:
|
||||
(tuple): A tuple containing the model's information (string representations of the model).
|
||||
|
||||
Examples:
|
||||
>>> sam = SAM("sam_b.pt")
|
||||
>>> info = sam.info()
|
||||
>>> print(info[0]) # Print summary information
|
||||
"""
|
||||
return model_info(self.model, detailed=detailed, verbose=verbose)
|
||||
|
||||
@property
|
||||
def task_map(self) -> dict[str, dict[str, type[Predictor]]]:
|
||||
"""
|
||||
Provide a mapping from the 'segment' task to its corresponding 'Predictor'.
|
||||
|
||||
Returns:
|
||||
(dict[str, dict[str, Type[Predictor]]]): A dictionary mapping the 'segment' task to its corresponding
|
||||
Predictor class. For SAM2 models, it maps to SAM2Predictor, otherwise to the standard Predictor.
|
||||
|
||||
Examples:
|
||||
>>> sam = SAM("sam_b.pt")
|
||||
>>> task_map = sam.task_map
|
||||
>>> print(task_map)
|
||||
{'segment': {'predictor': <class 'ultralytics.models.sam.predict.Predictor'>}}
|
||||
"""
|
||||
return {"segment": {"predictor": SAM2Predictor if self.is_sam2 else Predictor}}
|
||||
1
ultralytics/models/sam/modules/__init__.py
Normal file
1
ultralytics/models/sam/modules/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
||||
1128
ultralytics/models/sam/modules/blocks.py
Normal file
1128
ultralytics/models/sam/modules/blocks.py
Normal file
File diff suppressed because it is too large
Load Diff
513
ultralytics/models/sam/modules/decoders.py
Normal file
513
ultralytics/models/sam/modules/decoders.py
Normal file
@@ -0,0 +1,513 @@
|
||||
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from ultralytics.nn.modules import MLP, LayerNorm2d
|
||||
|
||||
|
||||
class MaskDecoder(nn.Module):
|
||||
"""
|
||||
Decoder module for generating masks and their associated quality scores using a transformer architecture.
|
||||
|
||||
This class predicts masks given image and prompt embeddings, utilizing a transformer to process the inputs and
|
||||
generate mask predictions along with their quality scores.
|
||||
|
||||
Attributes:
|
||||
transformer_dim (int): Channel dimension for the transformer module.
|
||||
transformer (nn.Module): Transformer module used for mask prediction.
|
||||
num_multimask_outputs (int): Number of masks to predict for disambiguating masks.
|
||||
iou_token (nn.Embedding): Embedding for the IoU token.
|
||||
num_mask_tokens (int): Number of mask tokens.
|
||||
mask_tokens (nn.Embedding): Embedding for the mask tokens.
|
||||
output_upscaling (nn.Sequential): Neural network sequence for upscaling the output.
|
||||
output_hypernetworks_mlps (nn.ModuleList): Hypernetwork MLPs for generating masks.
|
||||
iou_prediction_head (nn.Module): MLP for predicting mask quality.
|
||||
|
||||
Methods:
|
||||
forward: Predict masks given image and prompt embeddings.
|
||||
predict_masks: Internal method for mask prediction.
|
||||
|
||||
Examples:
|
||||
>>> decoder = MaskDecoder(transformer_dim=256, transformer=transformer_module)
|
||||
>>> masks, iou_pred = decoder(
|
||||
... image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, multimask_output=True
|
||||
... )
|
||||
>>> print(f"Predicted masks shape: {masks.shape}, IoU predictions shape: {iou_pred.shape}")
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
transformer_dim: int,
|
||||
transformer: nn.Module,
|
||||
num_multimask_outputs: int = 3,
|
||||
activation: type[nn.Module] = nn.GELU,
|
||||
iou_head_depth: int = 3,
|
||||
iou_head_hidden_dim: int = 256,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize the MaskDecoder module for generating masks and their associated quality scores.
|
||||
|
||||
Args:
|
||||
transformer_dim (int): Channel dimension for the transformer module.
|
||||
transformer (nn.Module): Transformer module used for mask prediction.
|
||||
num_multimask_outputs (int): Number of masks to predict for disambiguating masks.
|
||||
activation (Type[nn.Module]): Type of activation to use when upscaling masks.
|
||||
iou_head_depth (int): Depth of the MLP used to predict mask quality.
|
||||
iou_head_hidden_dim (int): Hidden dimension of the MLP used to predict mask quality.
|
||||
|
||||
Examples:
|
||||
>>> transformer = nn.TransformerEncoder(nn.TransformerEncoderLayer(d_model=256, nhead=8), num_layers=6)
|
||||
>>> decoder = MaskDecoder(transformer_dim=256, transformer=transformer)
|
||||
>>> print(decoder)
|
||||
"""
|
||||
super().__init__()
|
||||
self.transformer_dim = transformer_dim
|
||||
self.transformer = transformer
|
||||
|
||||
self.num_multimask_outputs = num_multimask_outputs
|
||||
|
||||
self.iou_token = nn.Embedding(1, transformer_dim)
|
||||
self.num_mask_tokens = num_multimask_outputs + 1
|
||||
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
|
||||
|
||||
self.output_upscaling = nn.Sequential(
|
||||
nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
|
||||
LayerNorm2d(transformer_dim // 4),
|
||||
activation(),
|
||||
nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
|
||||
activation(),
|
||||
)
|
||||
self.output_hypernetworks_mlps = nn.ModuleList(
|
||||
[MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for _ in range(self.num_mask_tokens)]
|
||||
)
|
||||
|
||||
self.iou_prediction_head = MLP(transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
image_embeddings: torch.Tensor,
|
||||
image_pe: torch.Tensor,
|
||||
sparse_prompt_embeddings: torch.Tensor,
|
||||
dense_prompt_embeddings: torch.Tensor,
|
||||
multimask_output: bool,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Predict masks given image and prompt embeddings.
|
||||
|
||||
Args:
|
||||
image_embeddings (torch.Tensor): Embeddings from the image encoder.
|
||||
image_pe (torch.Tensor): Positional encoding with the shape of image_embeddings.
|
||||
sparse_prompt_embeddings (torch.Tensor): Embeddings of the points and boxes.
|
||||
dense_prompt_embeddings (torch.Tensor): Embeddings of the mask inputs.
|
||||
multimask_output (bool): Whether to return multiple masks or a single mask.
|
||||
|
||||
Returns:
|
||||
masks (torch.Tensor): Batched predicted masks.
|
||||
iou_pred (torch.Tensor): Batched predictions of mask quality.
|
||||
|
||||
Examples:
|
||||
>>> decoder = MaskDecoder(transformer_dim=256, transformer=transformer_module)
|
||||
>>> image_emb = torch.rand(1, 256, 64, 64)
|
||||
>>> image_pe = torch.rand(1, 256, 64, 64)
|
||||
>>> sparse_emb = torch.rand(1, 2, 256)
|
||||
>>> dense_emb = torch.rand(1, 256, 64, 64)
|
||||
>>> masks, iou_pred = decoder(image_emb, image_pe, sparse_emb, dense_emb, multimask_output=True)
|
||||
>>> print(f"Masks shape: {masks.shape}, IoU predictions shape: {iou_pred.shape}")
|
||||
"""
|
||||
masks, iou_pred = self.predict_masks(
|
||||
image_embeddings=image_embeddings,
|
||||
image_pe=image_pe,
|
||||
sparse_prompt_embeddings=sparse_prompt_embeddings,
|
||||
dense_prompt_embeddings=dense_prompt_embeddings,
|
||||
)
|
||||
|
||||
# Select the correct mask or masks for output
|
||||
mask_slice = slice(1, None) if multimask_output else slice(0, 1)
|
||||
masks = masks[:, mask_slice, :, :]
|
||||
iou_pred = iou_pred[:, mask_slice]
|
||||
|
||||
return masks, iou_pred
|
||||
|
||||
def predict_masks(
|
||||
self,
|
||||
image_embeddings: torch.Tensor,
|
||||
image_pe: torch.Tensor,
|
||||
sparse_prompt_embeddings: torch.Tensor,
|
||||
dense_prompt_embeddings: torch.Tensor,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""Predict masks and quality scores using image and prompt embeddings via transformer architecture."""
|
||||
# Concatenate output tokens
|
||||
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
|
||||
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.shape[0], -1, -1)
|
||||
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
|
||||
|
||||
# Expand per-image data in batch direction to be per-mask
|
||||
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
|
||||
src = src + dense_prompt_embeddings
|
||||
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
|
||||
b, c, h, w = src.shape
|
||||
|
||||
# Run the transformer
|
||||
hs, src = self.transformer(src, pos_src, tokens)
|
||||
iou_token_out = hs[:, 0, :]
|
||||
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
|
||||
|
||||
# Upscale mask embeddings and predict masks using the mask tokens
|
||||
src = src.transpose(1, 2).view(b, c, h, w)
|
||||
upscaled_embedding = self.output_upscaling(src)
|
||||
hyper_in_list: list[torch.Tensor] = [
|
||||
self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) for i in range(self.num_mask_tokens)
|
||||
]
|
||||
hyper_in = torch.stack(hyper_in_list, dim=1)
|
||||
b, c, h, w = upscaled_embedding.shape
|
||||
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
|
||||
|
||||
# Generate mask quality predictions
|
||||
iou_pred = self.iou_prediction_head(iou_token_out)
|
||||
|
||||
return masks, iou_pred
|
||||
|
||||
|
||||
class SAM2MaskDecoder(nn.Module):
|
||||
"""
|
||||
Transformer-based decoder for predicting instance segmentation masks from image and prompt embeddings.
|
||||
|
||||
This class extends the functionality of the MaskDecoder, incorporating additional features such as
|
||||
high-resolution feature processing, dynamic multimask output, and object score prediction.
|
||||
|
||||
Attributes:
|
||||
transformer_dim (int): Channel dimension of the transformer.
|
||||
transformer (nn.Module): Transformer used to predict masks.
|
||||
num_multimask_outputs (int): Number of masks to predict when disambiguating masks.
|
||||
iou_token (nn.Embedding): Embedding for IOU token.
|
||||
num_mask_tokens (int): Total number of mask tokens.
|
||||
mask_tokens (nn.Embedding): Embedding for mask tokens.
|
||||
pred_obj_scores (bool): Whether to predict object scores.
|
||||
obj_score_token (nn.Embedding): Embedding for object score token.
|
||||
use_multimask_token_for_obj_ptr (bool): Whether to use multimask token for object pointer.
|
||||
output_upscaling (nn.Sequential): Upscaling layers for output.
|
||||
use_high_res_features (bool): Whether to use high-resolution features.
|
||||
conv_s0 (nn.Conv2d): Convolutional layer for high-resolution features (s0).
|
||||
conv_s1 (nn.Conv2d): Convolutional layer for high-resolution features (s1).
|
||||
output_hypernetworks_mlps (nn.ModuleList): List of MLPs for output hypernetworks.
|
||||
iou_prediction_head (MLP): MLP for IOU prediction.
|
||||
pred_obj_score_head (nn.Linear | MLP): Linear layer or MLP for object score prediction.
|
||||
dynamic_multimask_via_stability (bool): Whether to use dynamic multimask via stability.
|
||||
dynamic_multimask_stability_delta (float): Delta value for dynamic multimask stability.
|
||||
dynamic_multimask_stability_thresh (float): Threshold for dynamic multimask stability.
|
||||
|
||||
Methods:
|
||||
forward: Predict masks given image and prompt embeddings.
|
||||
predict_masks: Predict instance segmentation masks from image and prompt embeddings.
|
||||
_get_stability_scores: Compute mask stability scores based on IoU between thresholds.
|
||||
_dynamic_multimask_via_stability: Dynamically select the most stable mask output.
|
||||
|
||||
Examples:
|
||||
>>> image_embeddings = torch.rand(1, 256, 64, 64)
|
||||
>>> image_pe = torch.rand(1, 256, 64, 64)
|
||||
>>> sparse_prompt_embeddings = torch.rand(1, 2, 256)
|
||||
>>> dense_prompt_embeddings = torch.rand(1, 256, 64, 64)
|
||||
>>> decoder = SAM2MaskDecoder(256, transformer)
|
||||
>>> masks, iou_pred, sam_tokens_out, obj_score_logits = decoder.forward(
|
||||
... image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, True, False
|
||||
... )
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
transformer_dim: int,
|
||||
transformer: nn.Module,
|
||||
num_multimask_outputs: int = 3,
|
||||
activation: type[nn.Module] = nn.GELU,
|
||||
iou_head_depth: int = 3,
|
||||
iou_head_hidden_dim: int = 256,
|
||||
use_high_res_features: bool = False,
|
||||
iou_prediction_use_sigmoid=False,
|
||||
dynamic_multimask_via_stability=False,
|
||||
dynamic_multimask_stability_delta=0.05,
|
||||
dynamic_multimask_stability_thresh=0.98,
|
||||
pred_obj_scores: bool = False,
|
||||
pred_obj_scores_mlp: bool = False,
|
||||
use_multimask_token_for_obj_ptr: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize the SAM2MaskDecoder module for predicting instance segmentation masks.
|
||||
|
||||
This decoder extends the functionality of MaskDecoder, incorporating additional features such as
|
||||
high-resolution feature processing, dynamic multimask output, and object score prediction.
|
||||
|
||||
Args:
|
||||
transformer_dim (int): Channel dimension of the transformer.
|
||||
transformer (nn.Module): Transformer used to predict masks.
|
||||
num_multimask_outputs (int): Number of masks to predict when disambiguating masks.
|
||||
activation (Type[nn.Module]): Type of activation to use when upscaling masks.
|
||||
iou_head_depth (int): Depth of the MLP used to predict mask quality.
|
||||
iou_head_hidden_dim (int): Hidden dimension of the MLP used to predict mask quality.
|
||||
use_high_res_features (bool): Whether to use high-resolution features.
|
||||
iou_prediction_use_sigmoid (bool): Whether to use sigmoid for IOU prediction.
|
||||
dynamic_multimask_via_stability (bool): Whether to use dynamic multimask via stability.
|
||||
dynamic_multimask_stability_delta (float): Delta value for dynamic multimask stability.
|
||||
dynamic_multimask_stability_thresh (float): Threshold for dynamic multimask stability.
|
||||
pred_obj_scores (bool): Whether to predict object scores.
|
||||
pred_obj_scores_mlp (bool): Whether to use MLP for object score prediction.
|
||||
use_multimask_token_for_obj_ptr (bool): Whether to use multimask token for object pointer.
|
||||
|
||||
Examples:
|
||||
>>> transformer = nn.TransformerEncoder(nn.TransformerEncoderLayer(d_model=256, nhead=8), num_layers=6)
|
||||
>>> decoder = SAM2MaskDecoder(transformer_dim=256, transformer=transformer)
|
||||
>>> print(decoder)
|
||||
"""
|
||||
super().__init__()
|
||||
self.transformer_dim = transformer_dim
|
||||
self.transformer = transformer
|
||||
|
||||
self.num_multimask_outputs = num_multimask_outputs
|
||||
|
||||
self.iou_token = nn.Embedding(1, transformer_dim)
|
||||
self.num_mask_tokens = num_multimask_outputs + 1
|
||||
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
|
||||
|
||||
self.pred_obj_scores = pred_obj_scores
|
||||
if self.pred_obj_scores:
|
||||
self.obj_score_token = nn.Embedding(1, transformer_dim)
|
||||
self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
|
||||
|
||||
self.output_upscaling = nn.Sequential(
|
||||
nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
|
||||
LayerNorm2d(transformer_dim // 4),
|
||||
activation(),
|
||||
nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
|
||||
activation(),
|
||||
)
|
||||
self.use_high_res_features = use_high_res_features
|
||||
if use_high_res_features:
|
||||
self.conv_s0 = nn.Conv2d(transformer_dim, transformer_dim // 8, kernel_size=1, stride=1)
|
||||
self.conv_s1 = nn.Conv2d(transformer_dim, transformer_dim // 4, kernel_size=1, stride=1)
|
||||
|
||||
self.output_hypernetworks_mlps = nn.ModuleList(
|
||||
[MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for _ in range(self.num_mask_tokens)]
|
||||
)
|
||||
|
||||
self.iou_prediction_head = MLP(
|
||||
transformer_dim,
|
||||
iou_head_hidden_dim,
|
||||
self.num_mask_tokens,
|
||||
iou_head_depth,
|
||||
sigmoid=iou_prediction_use_sigmoid,
|
||||
)
|
||||
if self.pred_obj_scores:
|
||||
self.pred_obj_score_head = nn.Linear(transformer_dim, 1)
|
||||
if pred_obj_scores_mlp:
|
||||
self.pred_obj_score_head = MLP(transformer_dim, transformer_dim, 1, 3)
|
||||
|
||||
# When outputting a single mask, optionally we can dynamically fall back to the best
|
||||
# multimask output token if the single mask output token gives low stability scores.
|
||||
self.dynamic_multimask_via_stability = dynamic_multimask_via_stability
|
||||
self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta
|
||||
self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh
|
||||
|
||||
def forward(
|
||||
self,
|
||||
image_embeddings: torch.Tensor,
|
||||
image_pe: torch.Tensor,
|
||||
sparse_prompt_embeddings: torch.Tensor,
|
||||
dense_prompt_embeddings: torch.Tensor,
|
||||
multimask_output: bool,
|
||||
repeat_image: bool,
|
||||
high_res_features: list[torch.Tensor] | None = None,
|
||||
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Predict masks given image and prompt embeddings.
|
||||
|
||||
Args:
|
||||
image_embeddings (torch.Tensor): Embeddings from the image encoder with shape (B, C, H, W).
|
||||
image_pe (torch.Tensor): Positional encoding with the shape of image_embeddings (B, C, H, W).
|
||||
sparse_prompt_embeddings (torch.Tensor): Embeddings of the points and boxes with shape (B, N, C).
|
||||
dense_prompt_embeddings (torch.Tensor): Embeddings of the mask inputs with shape (B, C, H, W).
|
||||
multimask_output (bool): Whether to return multiple masks or a single mask.
|
||||
repeat_image (bool): Flag to repeat the image embeddings.
|
||||
high_res_features (list[torch.Tensor] | None, optional): Optional high-resolution features.
|
||||
|
||||
Returns:
|
||||
masks (torch.Tensor): Batched predicted masks with shape (B, N, H, W).
|
||||
iou_pred (torch.Tensor): Batched predictions of mask quality with shape (B, N).
|
||||
sam_tokens_out (torch.Tensor): Batched SAM token for mask output with shape (B, N, C).
|
||||
object_score_logits (torch.Tensor): Batched object score logits with shape (B, 1).
|
||||
|
||||
Examples:
|
||||
>>> image_embeddings = torch.rand(1, 256, 64, 64)
|
||||
>>> image_pe = torch.rand(1, 256, 64, 64)
|
||||
>>> sparse_prompt_embeddings = torch.rand(1, 2, 256)
|
||||
>>> dense_prompt_embeddings = torch.rand(1, 256, 64, 64)
|
||||
>>> decoder = SAM2MaskDecoder(256, transformer)
|
||||
>>> masks, iou_pred, sam_tokens_out, obj_score_logits = decoder.forward(
|
||||
... image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, True, False
|
||||
... )
|
||||
"""
|
||||
masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks(
|
||||
image_embeddings=image_embeddings,
|
||||
image_pe=image_pe,
|
||||
sparse_prompt_embeddings=sparse_prompt_embeddings,
|
||||
dense_prompt_embeddings=dense_prompt_embeddings,
|
||||
repeat_image=repeat_image,
|
||||
high_res_features=high_res_features,
|
||||
)
|
||||
|
||||
# Select the correct mask or masks for output
|
||||
if multimask_output:
|
||||
masks = masks[:, 1:, :, :]
|
||||
iou_pred = iou_pred[:, 1:]
|
||||
elif self.dynamic_multimask_via_stability and not self.training:
|
||||
masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred)
|
||||
else:
|
||||
masks = masks[:, 0:1, :, :]
|
||||
iou_pred = iou_pred[:, 0:1]
|
||||
|
||||
if multimask_output and self.use_multimask_token_for_obj_ptr:
|
||||
sam_tokens_out = mask_tokens_out[:, 1:] # [b, 3, c] shape
|
||||
else:
|
||||
# Take the mask output token. Here we *always* use the token for single mask output.
|
||||
# At test time, even if we track after 1-click (and using multimask_output=True),
|
||||
# we still take the single mask token here. The rationale is that we always track
|
||||
# after multiple clicks during training, so the past tokens seen during training
|
||||
# are always the single mask token (and we'll let it be the object-memory token).
|
||||
sam_tokens_out = mask_tokens_out[:, 0:1] # [b, 1, c] shape
|
||||
|
||||
return masks, iou_pred, sam_tokens_out, object_score_logits
|
||||
|
||||
def predict_masks(
|
||||
self,
|
||||
image_embeddings: torch.Tensor,
|
||||
image_pe: torch.Tensor,
|
||||
sparse_prompt_embeddings: torch.Tensor,
|
||||
dense_prompt_embeddings: torch.Tensor,
|
||||
repeat_image: bool,
|
||||
high_res_features: list[torch.Tensor] | None = None,
|
||||
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
||||
"""Predict instance segmentation masks from image and prompt embeddings using a transformer."""
|
||||
# Concatenate output tokens
|
||||
s = 0
|
||||
if self.pred_obj_scores:
|
||||
output_tokens = torch.cat(
|
||||
[
|
||||
self.obj_score_token.weight,
|
||||
self.iou_token.weight,
|
||||
self.mask_tokens.weight,
|
||||
],
|
||||
dim=0,
|
||||
)
|
||||
s = 1
|
||||
else:
|
||||
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
|
||||
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.shape[0], -1, -1)
|
||||
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
|
||||
|
||||
# Expand per-image data in batch direction to be per-mask
|
||||
if repeat_image:
|
||||
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
|
||||
else:
|
||||
assert image_embeddings.shape[0] == tokens.shape[0]
|
||||
src = image_embeddings
|
||||
src = src + dense_prompt_embeddings
|
||||
assert image_pe.shape[0] == 1, "image_pe should have size 1 in batch dim (from `get_dense_pe()`)"
|
||||
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
|
||||
b, c, h, w = src.shape
|
||||
|
||||
# Run the transformer
|
||||
hs, src = self.transformer(src, pos_src, tokens)
|
||||
iou_token_out = hs[:, s, :]
|
||||
mask_tokens_out = hs[:, s + 1 : (s + 1 + self.num_mask_tokens), :]
|
||||
|
||||
# Upscale mask embeddings and predict masks using the mask tokens
|
||||
src = src.transpose(1, 2).view(b, c, h, w)
|
||||
if not self.use_high_res_features or high_res_features is None:
|
||||
upscaled_embedding = self.output_upscaling(src)
|
||||
else:
|
||||
dc1, ln1, act1, dc2, act2 = self.output_upscaling
|
||||
feat_s0, feat_s1 = high_res_features
|
||||
upscaled_embedding = act1(ln1(dc1(src) + feat_s1))
|
||||
upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0)
|
||||
|
||||
hyper_in_list: list[torch.Tensor] = [
|
||||
self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) for i in range(self.num_mask_tokens)
|
||||
]
|
||||
hyper_in = torch.stack(hyper_in_list, dim=1)
|
||||
b, c, h, w = upscaled_embedding.shape
|
||||
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
|
||||
|
||||
# Generate mask quality predictions
|
||||
iou_pred = self.iou_prediction_head(iou_token_out)
|
||||
if self.pred_obj_scores:
|
||||
assert s == 1
|
||||
object_score_logits = self.pred_obj_score_head(hs[:, 0, :])
|
||||
else:
|
||||
# Obj scores logits - default to 10.0, i.e. assuming the object is present, sigmoid(10)=1
|
||||
object_score_logits = 10.0 * iou_pred.new_ones(iou_pred.shape[0], 1)
|
||||
|
||||
return masks, iou_pred, mask_tokens_out, object_score_logits
|
||||
|
||||
def _get_stability_scores(self, mask_logits):
|
||||
"""Compute mask stability scores based on IoU between upper and lower thresholds."""
|
||||
mask_logits = mask_logits.flatten(-2)
|
||||
stability_delta = self.dynamic_multimask_stability_delta
|
||||
area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
|
||||
area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
|
||||
return torch.where(area_u > 0, area_i / area_u, 1.0)
|
||||
|
||||
def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
|
||||
"""
|
||||
Dynamically select the most stable mask output based on stability scores and IoU predictions.
|
||||
|
||||
This method is used when outputting a single mask. If the stability score from the current single-mask
|
||||
output (based on output token 0) falls below a threshold, it instead selects from multi-mask outputs
|
||||
(based on output tokens 1-3) the mask with the highest predicted IoU score. This ensures a valid mask
|
||||
for both clicking and tracking scenarios.
|
||||
|
||||
Args:
|
||||
all_mask_logits (torch.Tensor): Logits for all predicted masks, shape (B, N, H, W) where B is
|
||||
batch size, N is number of masks (typically 4), and H, W are mask dimensions.
|
||||
all_iou_scores (torch.Tensor): Predicted IoU scores for all masks, shape (B, N).
|
||||
|
||||
Returns:
|
||||
mask_logits_out (torch.Tensor): Selected mask logits, shape (B, 1, H, W).
|
||||
iou_scores_out (torch.Tensor): Selected IoU scores, shape (B, 1).
|
||||
|
||||
Examples:
|
||||
>>> decoder = SAM2MaskDecoder(...)
|
||||
>>> all_mask_logits = torch.rand(2, 4, 256, 256) # 2 images, 4 masks each
|
||||
>>> all_iou_scores = torch.rand(2, 4)
|
||||
>>> mask_logits, iou_scores = decoder._dynamic_multimask_via_stability(all_mask_logits, all_iou_scores)
|
||||
>>> print(mask_logits.shape, iou_scores.shape)
|
||||
torch.Size([2, 1, 256, 256]) torch.Size([2, 1])
|
||||
"""
|
||||
# The best mask from multimask output tokens (1~3)
|
||||
multimask_logits = all_mask_logits[:, 1:, :, :]
|
||||
multimask_iou_scores = all_iou_scores[:, 1:]
|
||||
best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1)
|
||||
batch_inds = torch.arange(multimask_iou_scores.shape[0], device=all_iou_scores.device)
|
||||
best_multimask_logits = multimask_logits[batch_inds, best_scores_inds]
|
||||
best_multimask_logits = best_multimask_logits.unsqueeze(1)
|
||||
best_multimask_iou_scores = multimask_iou_scores[batch_inds, best_scores_inds]
|
||||
best_multimask_iou_scores = best_multimask_iou_scores.unsqueeze(1)
|
||||
|
||||
# The mask from singlemask output token 0 and its stability score
|
||||
singlemask_logits = all_mask_logits[:, 0:1, :, :]
|
||||
singlemask_iou_scores = all_iou_scores[:, 0:1]
|
||||
stability_scores = self._get_stability_scores(singlemask_logits)
|
||||
is_stable = stability_scores >= self.dynamic_multimask_stability_thresh
|
||||
|
||||
# Dynamically fall back to best multimask output upon low stability scores.
|
||||
mask_logits_out = torch.where(
|
||||
is_stable[..., None, None].expand_as(singlemask_logits),
|
||||
singlemask_logits,
|
||||
best_multimask_logits,
|
||||
)
|
||||
iou_scores_out = torch.where(
|
||||
is_stable.expand_as(singlemask_iou_scores),
|
||||
singlemask_iou_scores,
|
||||
best_multimask_iou_scores,
|
||||
)
|
||||
return mask_logits_out, iou_scores_out
|
||||
851
ultralytics/models/sam/modules/encoders.py
Normal file
851
ultralytics/models/sam/modules/encoders.py
Normal file
@@ -0,0 +1,851 @@
|
||||
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from ultralytics.nn.modules import LayerNorm2d
|
||||
|
||||
from .blocks import (
|
||||
Block,
|
||||
CXBlock,
|
||||
Fuser,
|
||||
MaskDownSampler,
|
||||
MultiScaleBlock,
|
||||
PatchEmbed,
|
||||
PositionEmbeddingRandom,
|
||||
PositionEmbeddingSine,
|
||||
)
|
||||
|
||||
|
||||
class ImageEncoderViT(nn.Module):
|
||||
"""
|
||||
An image encoder using Vision Transformer (ViT) architecture for encoding images into a compact latent space.
|
||||
|
||||
This class processes images by splitting them into patches, applying transformer blocks, and generating a final
|
||||
encoded representation through a neck module.
|
||||
|
||||
Attributes:
|
||||
img_size (int): Dimension of input images, assumed to be square.
|
||||
patch_embed (PatchEmbed): Module for patch embedding.
|
||||
pos_embed (nn.Parameter | None): Absolute positional embedding for patches.
|
||||
blocks (nn.ModuleList): List of transformer blocks for processing patch embeddings.
|
||||
neck (nn.Sequential): Neck module to further process the output.
|
||||
|
||||
Methods:
|
||||
forward: Process input through patch embedding, positional embedding, blocks, and neck.
|
||||
|
||||
Examples:
|
||||
>>> import torch
|
||||
>>> encoder = ImageEncoderViT(img_size=224, patch_size=16, embed_dim=768, depth=12, num_heads=12)
|
||||
>>> input_image = torch.randn(1, 3, 224, 224)
|
||||
>>> output = encoder(input_image)
|
||||
>>> print(output.shape)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
img_size: int = 1024,
|
||||
patch_size: int = 16,
|
||||
in_chans: int = 3,
|
||||
embed_dim: int = 768,
|
||||
depth: int = 12,
|
||||
num_heads: int = 12,
|
||||
mlp_ratio: float = 4.0,
|
||||
out_chans: int = 256,
|
||||
qkv_bias: bool = True,
|
||||
norm_layer: type[nn.Module] = nn.LayerNorm,
|
||||
act_layer: type[nn.Module] = nn.GELU,
|
||||
use_abs_pos: bool = True,
|
||||
use_rel_pos: bool = False,
|
||||
rel_pos_zero_init: bool = True,
|
||||
window_size: int = 0,
|
||||
global_attn_indexes: tuple[int, ...] = (),
|
||||
) -> None:
|
||||
"""
|
||||
Initialize an ImageEncoderViT instance for encoding images using Vision Transformer architecture.
|
||||
|
||||
Args:
|
||||
img_size (int): Input image size, assumed to be square.
|
||||
patch_size (int): Size of image patches.
|
||||
in_chans (int): Number of input image channels.
|
||||
embed_dim (int): Dimension of patch embeddings.
|
||||
depth (int): Number of transformer blocks.
|
||||
num_heads (int): Number of attention heads in each block.
|
||||
mlp_ratio (float): Ratio of MLP hidden dimension to embedding dimension.
|
||||
out_chans (int): Number of output channels from the neck module.
|
||||
qkv_bias (bool): If True, adds learnable bias to query, key, value projections.
|
||||
norm_layer (Type[nn.Module]): Type of normalization layer to use.
|
||||
act_layer (Type[nn.Module]): Type of activation layer to use.
|
||||
use_abs_pos (bool): If True, uses absolute positional embeddings.
|
||||
use_rel_pos (bool): If True, adds relative positional embeddings to attention maps.
|
||||
rel_pos_zero_init (bool): If True, initializes relative positional parameters to zero.
|
||||
window_size (int): Size of attention window for windowed attention blocks.
|
||||
global_attn_indexes (tuple[int, ...]): Indices of blocks that use global attention.
|
||||
|
||||
Examples:
|
||||
>>> encoder = ImageEncoderViT(img_size=224, patch_size=16, embed_dim=768, depth=12, num_heads=12)
|
||||
>>> input_image = torch.randn(1, 3, 224, 224)
|
||||
>>> output = encoder(input_image)
|
||||
>>> print(output.shape)
|
||||
"""
|
||||
super().__init__()
|
||||
self.img_size = img_size
|
||||
|
||||
self.patch_embed = PatchEmbed(
|
||||
kernel_size=(patch_size, patch_size),
|
||||
stride=(patch_size, patch_size),
|
||||
in_chans=in_chans,
|
||||
embed_dim=embed_dim,
|
||||
)
|
||||
|
||||
self.pos_embed: nn.Parameter | None = None
|
||||
if use_abs_pos:
|
||||
# Initialize absolute positional embedding with pretrain image size
|
||||
self.pos_embed = nn.Parameter(torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim))
|
||||
|
||||
self.blocks = nn.ModuleList()
|
||||
for i in range(depth):
|
||||
block = Block(
|
||||
dim=embed_dim,
|
||||
num_heads=num_heads,
|
||||
mlp_ratio=mlp_ratio,
|
||||
qkv_bias=qkv_bias,
|
||||
norm_layer=norm_layer,
|
||||
act_layer=act_layer,
|
||||
use_rel_pos=use_rel_pos,
|
||||
rel_pos_zero_init=rel_pos_zero_init,
|
||||
window_size=window_size if i not in global_attn_indexes else 0,
|
||||
input_size=(img_size // patch_size, img_size // patch_size),
|
||||
)
|
||||
self.blocks.append(block)
|
||||
|
||||
self.neck = nn.Sequential(
|
||||
nn.Conv2d(
|
||||
embed_dim,
|
||||
out_chans,
|
||||
kernel_size=1,
|
||||
bias=False,
|
||||
),
|
||||
LayerNorm2d(out_chans),
|
||||
nn.Conv2d(
|
||||
out_chans,
|
||||
out_chans,
|
||||
kernel_size=3,
|
||||
padding=1,
|
||||
bias=False,
|
||||
),
|
||||
LayerNorm2d(out_chans),
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""Process input through patch embedding, positional embedding, transformer blocks, and neck module."""
|
||||
x = self.patch_embed(x)
|
||||
if self.pos_embed is not None:
|
||||
pos_embed = (
|
||||
F.interpolate(self.pos_embed.permute(0, 3, 1, 2), scale_factor=self.img_size / 1024).permute(0, 2, 3, 1)
|
||||
if self.img_size != 1024
|
||||
else self.pos_embed
|
||||
)
|
||||
x = x + pos_embed
|
||||
for blk in self.blocks:
|
||||
x = blk(x)
|
||||
return self.neck(x.permute(0, 3, 1, 2))
|
||||
|
||||
|
||||
class PromptEncoder(nn.Module):
|
||||
"""
|
||||
Encode different types of prompts for input to SAM's mask decoder, producing sparse and dense embeddings.
|
||||
|
||||
Attributes:
|
||||
embed_dim (int): Dimension of the embeddings.
|
||||
input_image_size (tuple[int, int]): Size of the input image as (H, W).
|
||||
image_embedding_size (tuple[int, int]): Spatial size of the image embedding as (H, W).
|
||||
pe_layer (PositionEmbeddingRandom): Module for random position embedding.
|
||||
num_point_embeddings (int): Number of point embeddings for different types of points.
|
||||
point_embeddings (nn.ModuleList): List of point embeddings.
|
||||
not_a_point_embed (nn.Embedding): Embedding for points that are not part of any label.
|
||||
mask_input_size (tuple[int, int]): Size of the input mask.
|
||||
mask_downscaling (nn.Sequential): Neural network for downscaling the mask.
|
||||
no_mask_embed (nn.Embedding): Embedding for cases where no mask is provided.
|
||||
|
||||
Methods:
|
||||
get_dense_pe: Return the positional encoding used to encode point prompts.
|
||||
forward: Embed different types of prompts, returning both sparse and dense embeddings.
|
||||
|
||||
Examples:
|
||||
>>> prompt_encoder = PromptEncoder(256, (64, 64), (1024, 1024), 16)
|
||||
>>> points = (torch.rand(1, 5, 2), torch.randint(0, 4, (1, 5)))
|
||||
>>> boxes = torch.rand(1, 2, 2)
|
||||
>>> masks = torch.rand(1, 1, 256, 256)
|
||||
>>> sparse_embeddings, dense_embeddings = prompt_encoder(points, boxes, masks)
|
||||
>>> print(sparse_embeddings.shape, dense_embeddings.shape)
|
||||
torch.Size([1, 7, 256]) torch.Size([1, 256, 64, 64])
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embed_dim: int,
|
||||
image_embedding_size: tuple[int, int],
|
||||
input_image_size: tuple[int, int],
|
||||
mask_in_chans: int,
|
||||
activation: type[nn.Module] = nn.GELU,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize the PromptEncoder module for encoding various types of prompts.
|
||||
|
||||
Args:
|
||||
embed_dim (int): The dimension of the embeddings.
|
||||
image_embedding_size (tuple[int, int]): The spatial size of the image embedding as (H, W).
|
||||
input_image_size (tuple[int, int]): The padded size of the input image as (H, W).
|
||||
mask_in_chans (int): The number of hidden channels used for encoding input masks.
|
||||
activation (Type[nn.Module]): The activation function to use when encoding input masks.
|
||||
|
||||
Examples:
|
||||
>>> prompt_encoder = PromptEncoder(256, (64, 64), (1024, 1024), 16)
|
||||
>>> points = (torch.rand(1, 5, 2), torch.randint(0, 4, (1, 5)))
|
||||
>>> boxes = torch.rand(1, 2, 2)
|
||||
>>> masks = torch.rand(1, 1, 256, 256)
|
||||
>>> sparse_embeddings, dense_embeddings = prompt_encoder(points, boxes, masks)
|
||||
>>> print(sparse_embeddings.shape, dense_embeddings.shape)
|
||||
torch.Size([1, 7, 256]) torch.Size([1, 256, 64, 64])
|
||||
"""
|
||||
super().__init__()
|
||||
self.embed_dim = embed_dim
|
||||
self.input_image_size = input_image_size
|
||||
self.image_embedding_size = image_embedding_size
|
||||
self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
|
||||
|
||||
self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
|
||||
point_embeddings = [nn.Embedding(1, embed_dim) for _ in range(self.num_point_embeddings)]
|
||||
self.point_embeddings = nn.ModuleList(point_embeddings)
|
||||
self.not_a_point_embed = nn.Embedding(1, embed_dim)
|
||||
|
||||
self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
|
||||
self.mask_downscaling = nn.Sequential(
|
||||
nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
|
||||
LayerNorm2d(mask_in_chans // 4),
|
||||
activation(),
|
||||
nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
|
||||
LayerNorm2d(mask_in_chans),
|
||||
activation(),
|
||||
nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
|
||||
)
|
||||
self.no_mask_embed = nn.Embedding(1, embed_dim)
|
||||
|
||||
def get_dense_pe(self) -> torch.Tensor:
|
||||
"""
|
||||
Return the dense positional encoding used for encoding point prompts.
|
||||
|
||||
Generate a positional encoding for a dense set of points matching the shape of the image
|
||||
encoding. The encoding is used to provide spatial information to the model when processing point prompts.
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): Positional encoding tensor with shape (1, embed_dim, H, W), where H and W are the
|
||||
height and width of the image embedding size, respectively.
|
||||
|
||||
Examples:
|
||||
>>> prompt_encoder = PromptEncoder(256, (64, 64), (1024, 1024), 16)
|
||||
>>> dense_pe = prompt_encoder.get_dense_pe()
|
||||
>>> print(dense_pe.shape)
|
||||
torch.Size([1, 256, 64, 64])
|
||||
"""
|
||||
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
|
||||
|
||||
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor:
|
||||
"""Embed point prompts by applying positional encoding and label-specific embeddings."""
|
||||
points = points + 0.5 # Shift to center of pixel
|
||||
if pad:
|
||||
padding_point = torch.zeros((points.shape[0], 1, 2), dtype=points.dtype, device=points.device)
|
||||
padding_label = -torch.ones((labels.shape[0], 1), dtype=labels.dtype, device=labels.device)
|
||||
points = torch.cat([points, padding_point], dim=1)
|
||||
labels = torch.cat([labels, padding_label], dim=1)
|
||||
point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
|
||||
point_embedding[labels == -1] = 0.0
|
||||
point_embedding[labels == -1] += self.not_a_point_embed.weight
|
||||
point_embedding[labels == 0] += self.point_embeddings[0].weight
|
||||
point_embedding[labels == 1] += self.point_embeddings[1].weight
|
||||
point_embedding[labels == 2] += self.point_embeddings[2].weight
|
||||
point_embedding[labels == 3] += self.point_embeddings[3].weight
|
||||
return point_embedding
|
||||
|
||||
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
|
||||
"""Embed box prompts by applying positional encoding and adding corner embeddings."""
|
||||
boxes = boxes + 0.5 # Shift to center of pixel
|
||||
coords = boxes.reshape(-1, 2, 2)
|
||||
corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
|
||||
corner_embedding[:, 0, :] += self.point_embeddings[2].weight
|
||||
corner_embedding[:, 1, :] += self.point_embeddings[3].weight
|
||||
return corner_embedding
|
||||
|
||||
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
|
||||
"""Embed mask inputs by downscaling and processing through convolutional layers."""
|
||||
return self.mask_downscaling(masks)
|
||||
|
||||
@staticmethod
|
||||
def _get_batch_size(
|
||||
points: tuple[torch.Tensor, torch.Tensor] | None,
|
||||
boxes: torch.Tensor | None,
|
||||
masks: torch.Tensor | None,
|
||||
) -> int:
|
||||
"""Get the batch size of the output given the batch size of the input prompts."""
|
||||
if points is not None:
|
||||
return points[0].shape[0]
|
||||
elif boxes is not None:
|
||||
return boxes.shape[0]
|
||||
elif masks is not None:
|
||||
return masks.shape[0]
|
||||
else:
|
||||
return 1
|
||||
|
||||
def forward(
|
||||
self,
|
||||
points: tuple[torch.Tensor, torch.Tensor] | None,
|
||||
boxes: torch.Tensor | None,
|
||||
masks: torch.Tensor | None,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Embed different types of prompts, returning both sparse and dense embeddings.
|
||||
|
||||
Args:
|
||||
points (tuple[torch.Tensor, torch.Tensor] | None): Point coordinates and labels to embed. The first
|
||||
tensor contains coordinates with shape (B, N, 2), and the second tensor contains labels with
|
||||
shape (B, N).
|
||||
boxes (torch.Tensor | None): Boxes to embed with shape (B, M, 2, 2), where M is the number of boxes.
|
||||
masks (torch.Tensor | None): Masks to embed with shape (B, 1, H, W).
|
||||
|
||||
Returns:
|
||||
sparse_embeddings (torch.Tensor): Sparse embeddings for points and boxes with shape (B, N, embed_dim).
|
||||
dense_embeddings (torch.Tensor): Dense embeddings for masks of shape (B, embed_dim, embed_H, embed_W).
|
||||
|
||||
Examples:
|
||||
>>> encoder = PromptEncoder(256, (64, 64), (1024, 1024), 16)
|
||||
>>> points = (torch.rand(1, 5, 2), torch.randint(0, 4, (1, 5)))
|
||||
>>> boxes = torch.rand(1, 2, 2, 2)
|
||||
>>> masks = torch.rand(1, 1, 256, 256)
|
||||
>>> sparse_emb, dense_emb = encoder(points, boxes, masks)
|
||||
>>> print(sparse_emb.shape, dense_emb.shape)
|
||||
torch.Size([1, 7, 256]) torch.Size([1, 256, 64, 64])
|
||||
"""
|
||||
bs = self._get_batch_size(points, boxes, masks)
|
||||
sparse_embeddings = torch.empty(
|
||||
(bs, 0, self.embed_dim),
|
||||
dtype=self.point_embeddings[0].weight.dtype,
|
||||
device=self.point_embeddings[0].weight.device,
|
||||
)
|
||||
if points is not None:
|
||||
coords, labels = points
|
||||
point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
|
||||
sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
|
||||
if boxes is not None:
|
||||
box_embeddings = self._embed_boxes(boxes)
|
||||
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
|
||||
|
||||
if masks is not None:
|
||||
dense_embeddings = self._embed_masks(masks)
|
||||
else:
|
||||
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
|
||||
bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
|
||||
)
|
||||
|
||||
return sparse_embeddings, dense_embeddings
|
||||
|
||||
|
||||
class MemoryEncoder(nn.Module):
|
||||
"""
|
||||
Encode pixel features and masks into a memory representation for efficient image segmentation.
|
||||
|
||||
This class processes pixel-level features and masks, fusing them to generate encoded memory representations
|
||||
suitable for downstream tasks in image segmentation models like SAM (Segment Anything Model).
|
||||
|
||||
Attributes:
|
||||
mask_downsampler (MaskDownSampler): Module for downsampling input masks.
|
||||
pix_feat_proj (nn.Conv2d): Convolutional layer for projecting pixel features.
|
||||
fuser (Fuser): Module for fusing pixel features and masks.
|
||||
position_encoding (PositionEmbeddingSine): Module for adding positional encoding to features.
|
||||
out_proj (nn.Module): Output projection layer, either nn.Identity or nn.Conv2d.
|
||||
|
||||
Methods:
|
||||
forward: Process input pixel features and masks to generate encoded memory representations.
|
||||
|
||||
Examples:
|
||||
>>> import torch
|
||||
>>> encoder = MemoryEncoder(out_dim=256, in_dim=256)
|
||||
>>> pix_feat = torch.randn(1, 256, 64, 64)
|
||||
>>> masks = torch.randn(1, 1, 64, 64)
|
||||
>>> encoded_feat, pos = encoder(pix_feat, masks)
|
||||
>>> print(encoded_feat.shape, pos.shape)
|
||||
torch.Size([1, 256, 64, 64]) torch.Size([1, 128, 64, 64])
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
out_dim,
|
||||
in_dim=256, # in_dim of pix_feats
|
||||
):
|
||||
"""
|
||||
Initialize the MemoryEncoder for encoding pixel features and masks into memory representations.
|
||||
|
||||
This encoder processes pixel-level features and masks, fusing them to generate encoded memory representations
|
||||
suitable for downstream tasks in image segmentation models like SAM (Segment Anything Model).
|
||||
|
||||
Args:
|
||||
out_dim (int): Output dimension of the encoded features.
|
||||
in_dim (int): Input dimension of the pixel features.
|
||||
|
||||
Examples:
|
||||
>>> encoder = MemoryEncoder(out_dim=256, in_dim=256)
|
||||
>>> pix_feat = torch.randn(1, 256, 64, 64)
|
||||
>>> masks = torch.randn(1, 1, 64, 64)
|
||||
>>> encoded_feat, pos = encoder(pix_feat, masks)
|
||||
>>> print(encoded_feat.shape, pos.shape)
|
||||
torch.Size([1, 256, 64, 64]) torch.Size([1, 128, 64, 64])
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.mask_downsampler = MaskDownSampler(kernel_size=3, stride=2, padding=1)
|
||||
|
||||
self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1)
|
||||
self.fuser = Fuser(CXBlock(dim=256), num_layers=2)
|
||||
self.position_encoding = PositionEmbeddingSine(num_pos_feats=64)
|
||||
self.out_proj = nn.Identity()
|
||||
if out_dim != in_dim:
|
||||
self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
pix_feat: torch.Tensor,
|
||||
masks: torch.Tensor,
|
||||
skip_mask_sigmoid: bool = False,
|
||||
) -> dict:
|
||||
"""Process pixel features and masks to generate encoded memory representations for segmentation."""
|
||||
if not skip_mask_sigmoid:
|
||||
masks = F.sigmoid(masks)
|
||||
masks = self.mask_downsampler(masks)
|
||||
|
||||
# Fuse pix_feats and downsampled masks, in case the visual features are on CPU, cast them to CUDA
|
||||
pix_feat = pix_feat.to(masks.device)
|
||||
|
||||
x = self.pix_feat_proj(pix_feat)
|
||||
x = x + masks
|
||||
x = self.fuser(x)
|
||||
x = self.out_proj(x)
|
||||
|
||||
pos = self.position_encoding(x).to(x.dtype)
|
||||
|
||||
return {"vision_features": x, "vision_pos_enc": [pos]}
|
||||
|
||||
|
||||
class ImageEncoder(nn.Module):
|
||||
"""
|
||||
Encode images using a trunk-neck architecture, producing multiscale features and positional encodings.
|
||||
|
||||
This class combines a trunk network for feature extraction with a neck network for feature refinement
|
||||
and positional encoding generation. It can optionally discard the lowest resolution features.
|
||||
|
||||
Attributes:
|
||||
trunk (nn.Module): The trunk network for initial feature extraction.
|
||||
neck (nn.Module): The neck network for feature refinement and positional encoding generation.
|
||||
scalp (int): Number of lowest resolution feature levels to discard.
|
||||
|
||||
Methods:
|
||||
forward: Process the input image through the trunk and neck networks.
|
||||
|
||||
Examples:
|
||||
>>> trunk = SomeTrunkNetwork()
|
||||
>>> neck = SomeNeckNetwork()
|
||||
>>> encoder = ImageEncoder(trunk, neck, scalp=1)
|
||||
>>> image = torch.randn(1, 3, 224, 224)
|
||||
>>> output = encoder(image)
|
||||
>>> print(output.keys())
|
||||
dict_keys(['vision_features', 'vision_pos_enc', 'backbone_fpn'])
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
trunk: nn.Module,
|
||||
neck: nn.Module,
|
||||
scalp: int = 0,
|
||||
):
|
||||
"""
|
||||
Initialize the ImageEncoder with trunk and neck networks for feature extraction and refinement.
|
||||
|
||||
This encoder combines a trunk network for feature extraction with a neck network for feature refinement
|
||||
and positional encoding generation. It can optionally discard the lowest resolution features.
|
||||
|
||||
Args:
|
||||
trunk (nn.Module): The trunk network for initial feature extraction.
|
||||
neck (nn.Module): The neck network for feature refinement and positional encoding generation.
|
||||
scalp (int): Number of lowest resolution feature levels to discard.
|
||||
|
||||
Examples:
|
||||
>>> trunk = SomeTrunkNetwork()
|
||||
>>> neck = SomeNeckNetwork()
|
||||
>>> encoder = ImageEncoder(trunk, neck, scalp=1)
|
||||
>>> image = torch.randn(1, 3, 224, 224)
|
||||
>>> output = encoder(image)
|
||||
>>> print(output.keys())
|
||||
dict_keys(['vision_features', 'vision_pos_enc', 'backbone_fpn'])
|
||||
"""
|
||||
super().__init__()
|
||||
self.trunk = trunk
|
||||
self.neck = neck
|
||||
self.scalp = scalp
|
||||
assert self.trunk.channel_list == self.neck.backbone_channel_list, (
|
||||
f"Channel dims of trunk {self.trunk.channel_list} and neck {self.neck.backbone_channel_list} do not match."
|
||||
)
|
||||
|
||||
def forward(self, sample: torch.Tensor):
|
||||
"""Encode input through trunk and neck networks, returning multiscale features and positional encodings."""
|
||||
features, pos = self.neck(self.trunk(sample))
|
||||
if self.scalp > 0:
|
||||
# Discard the lowest resolution features
|
||||
features, pos = features[: -self.scalp], pos[: -self.scalp]
|
||||
|
||||
src = features[-1]
|
||||
return {
|
||||
"vision_features": src,
|
||||
"vision_pos_enc": pos,
|
||||
"backbone_fpn": features,
|
||||
}
|
||||
|
||||
|
||||
class FpnNeck(nn.Module):
|
||||
"""
|
||||
A Feature Pyramid Network (FPN) neck variant for multiscale feature fusion in object detection models.
|
||||
|
||||
This FPN variant removes the output convolution and uses bicubic interpolation for feature resizing,
|
||||
similar to ViT positional embedding interpolation.
|
||||
|
||||
Attributes:
|
||||
position_encoding (PositionEmbeddingSine): Sinusoidal positional encoding module.
|
||||
convs (nn.ModuleList): List of convolutional layers for each backbone level.
|
||||
backbone_channel_list (list[int]): List of channel dimensions from the backbone.
|
||||
fpn_interp_model (str): Interpolation mode for FPN feature resizing.
|
||||
fuse_type (str): Type of feature fusion, either 'sum' or 'avg'.
|
||||
fpn_top_down_levels (list[int]): Levels to have top-down features in outputs.
|
||||
|
||||
Methods:
|
||||
forward: Perform forward pass through the FPN neck.
|
||||
|
||||
Examples:
|
||||
>>> backbone_channels = [64, 128, 256, 512]
|
||||
>>> fpn_neck = FpnNeck(256, backbone_channels)
|
||||
>>> inputs = [torch.rand(1, c, 32, 32) for c in backbone_channels]
|
||||
>>> outputs, positions = fpn_neck(inputs)
|
||||
>>> print(len(outputs), len(positions))
|
||||
4 4
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
d_model: int,
|
||||
backbone_channel_list: list[int],
|
||||
kernel_size: int = 1,
|
||||
stride: int = 1,
|
||||
padding: int = 0,
|
||||
fpn_interp_model: str = "bilinear",
|
||||
fuse_type: str = "sum",
|
||||
fpn_top_down_levels: list[int] | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize a modified Feature Pyramid Network (FPN) neck.
|
||||
|
||||
This FPN variant removes the output convolution and uses bicubic interpolation for feature resizing,
|
||||
similar to ViT positional embedding interpolation.
|
||||
|
||||
Args:
|
||||
d_model (int): Dimension of the model.
|
||||
backbone_channel_list (list[int]): List of channel dimensions from the backbone.
|
||||
kernel_size (int): Kernel size for the convolutional layers.
|
||||
stride (int): Stride for the convolutional layers.
|
||||
padding (int): Padding for the convolutional layers.
|
||||
fpn_interp_model (str): Interpolation mode for FPN feature resizing.
|
||||
fuse_type (str): Type of feature fusion, either 'sum' or 'avg'.
|
||||
fpn_top_down_levels (Optional[list[int]]): Levels to have top-down features in outputs.
|
||||
|
||||
Examples:
|
||||
>>> backbone_channels = [64, 128, 256, 512]
|
||||
>>> fpn_neck = FpnNeck(256, backbone_channels)
|
||||
>>> print(fpn_neck)
|
||||
"""
|
||||
super().__init__()
|
||||
self.position_encoding = PositionEmbeddingSine(num_pos_feats=256)
|
||||
self.convs = nn.ModuleList()
|
||||
self.backbone_channel_list = backbone_channel_list
|
||||
for dim in backbone_channel_list:
|
||||
current = nn.Sequential()
|
||||
current.add_module(
|
||||
"conv",
|
||||
nn.Conv2d(
|
||||
in_channels=dim,
|
||||
out_channels=d_model,
|
||||
kernel_size=kernel_size,
|
||||
stride=stride,
|
||||
padding=padding,
|
||||
),
|
||||
)
|
||||
|
||||
self.convs.append(current)
|
||||
self.fpn_interp_model = fpn_interp_model
|
||||
assert fuse_type in {"sum", "avg"}
|
||||
self.fuse_type = fuse_type
|
||||
|
||||
# Levels to have top-down features in its outputs
|
||||
# e.g. if fpn_top_down_levels is [2, 3], then only outputs of level 2 and 3
|
||||
# have top-down propagation, while outputs of level 0 and level 1 have only
|
||||
# lateral features from the same backbone level
|
||||
if fpn_top_down_levels is None:
|
||||
# Default is to have top-down features on all levels
|
||||
fpn_top_down_levels = range(len(self.convs))
|
||||
self.fpn_top_down_levels = list(fpn_top_down_levels)
|
||||
|
||||
def forward(self, xs: list[torch.Tensor]):
|
||||
"""
|
||||
Perform forward pass through the Feature Pyramid Network (FPN) neck.
|
||||
|
||||
This method processes a list of input tensors from the backbone through the FPN, applying lateral connections
|
||||
and top-down feature fusion. It generates output feature maps and corresponding positional encodings.
|
||||
|
||||
Args:
|
||||
xs (list[torch.Tensor]): List of input tensors from the backbone, each with shape (B, C, H, W).
|
||||
|
||||
Returns:
|
||||
out (list[torch.Tensor]): List of output feature maps after FPN processing, each with shape
|
||||
(B, d_model, H, W).
|
||||
pos (list[torch.Tensor]): List of positional encodings corresponding to each output feature map.
|
||||
|
||||
Examples:
|
||||
>>> fpn_neck = FpnNeck(d_model=256, backbone_channel_list=[64, 128, 256, 512])
|
||||
>>> inputs = [torch.rand(1, c, 32, 32) for c in [64, 128, 256, 512]]
|
||||
>>> outputs, positions = fpn_neck(inputs)
|
||||
>>> print(len(outputs), len(positions))
|
||||
4 4
|
||||
"""
|
||||
out = [None] * len(self.convs)
|
||||
pos = [None] * len(self.convs)
|
||||
assert len(xs) == len(self.convs)
|
||||
# FPN forward pass
|
||||
# see https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/fpn.py
|
||||
prev_features = None
|
||||
# Forward in top-down order (from low to high resolution)
|
||||
n = len(self.convs) - 1
|
||||
for i in range(n, -1, -1):
|
||||
x = xs[i]
|
||||
lateral_features = self.convs[n - i](x)
|
||||
if i in self.fpn_top_down_levels and prev_features is not None:
|
||||
top_down_features = F.interpolate(
|
||||
prev_features.to(dtype=x.dtype),
|
||||
scale_factor=2.0,
|
||||
mode=self.fpn_interp_model,
|
||||
align_corners=(None if self.fpn_interp_model == "nearest" else False),
|
||||
antialias=False,
|
||||
)
|
||||
prev_features = lateral_features + top_down_features
|
||||
if self.fuse_type == "avg":
|
||||
prev_features /= 2
|
||||
else:
|
||||
prev_features = lateral_features
|
||||
x_out = prev_features
|
||||
out[i] = x_out
|
||||
pos[i] = self.position_encoding(x_out).to(x_out.dtype)
|
||||
|
||||
return out, pos
|
||||
|
||||
|
||||
class Hiera(nn.Module):
|
||||
"""
|
||||
Hierarchical vision transformer for efficient multiscale feature extraction in image processing tasks.
|
||||
|
||||
This class implements a Hiera model, which is a hierarchical vision transformer architecture designed for
|
||||
efficient multiscale feature extraction. It uses a series of transformer blocks organized into stages,
|
||||
with optional pooling and global attention mechanisms.
|
||||
|
||||
Attributes:
|
||||
window_spec (tuple[int, ...]): Window sizes for each stage.
|
||||
q_stride (tuple[int, int]): Downsampling stride between stages.
|
||||
stage_ends (list[int]): Indices of the last block in each stage.
|
||||
q_pool_blocks (list[int]): Indices of blocks where pooling is applied.
|
||||
return_interm_layers (bool): Whether to return intermediate layer outputs.
|
||||
patch_embed (PatchEmbed): Module for patch embedding.
|
||||
global_att_blocks (tuple[int, ...]): Indices of blocks with global attention.
|
||||
window_pos_embed_bkg_spatial_size (tuple[int, int]): Spatial size for window positional embedding background.
|
||||
pos_embed (nn.Parameter): Positional embedding for the background.
|
||||
pos_embed_window (nn.Parameter): Positional embedding for the window.
|
||||
blocks (nn.ModuleList): List of MultiScaleBlock modules.
|
||||
channel_list (list[int]): List of output channel dimensions for each stage.
|
||||
|
||||
Methods:
|
||||
_get_pos_embed: Generate positional embeddings by interpolating and combining window and background embeddings.
|
||||
forward: Perform the forward pass through the Hiera model.
|
||||
|
||||
Examples:
|
||||
>>> model = Hiera(embed_dim=96, num_heads=1, stages=(2, 3, 16, 3))
|
||||
>>> input_tensor = torch.randn(1, 3, 224, 224)
|
||||
>>> output_features = model(input_tensor)
|
||||
>>> for feat in output_features:
|
||||
... print(feat.shape)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embed_dim: int = 96, # initial embed dim
|
||||
num_heads: int = 1, # initial number of heads
|
||||
drop_path_rate: float = 0.0, # stochastic depth
|
||||
q_pool: int = 3, # number of q_pool stages
|
||||
q_stride: tuple[int, int] = (2, 2), # downsample stride bet. stages
|
||||
stages: tuple[int, ...] = (2, 3, 16, 3), # blocks per stage
|
||||
dim_mul: float = 2.0, # dim_mul factor at stage shift
|
||||
head_mul: float = 2.0, # head_mul factor at stage shift
|
||||
window_pos_embed_bkg_spatial_size: tuple[int, int] = (14, 14),
|
||||
# window size per stage, when not using global att.
|
||||
window_spec: tuple[int, ...] = (
|
||||
8,
|
||||
4,
|
||||
14,
|
||||
7,
|
||||
),
|
||||
# global attn in these blocks
|
||||
global_att_blocks: tuple[int, ...] = (
|
||||
12,
|
||||
16,
|
||||
20,
|
||||
),
|
||||
return_interm_layers=True, # return feats from every stage
|
||||
):
|
||||
"""
|
||||
Initialize a Hiera model, a hierarchical vision transformer for efficient multiscale feature extraction.
|
||||
|
||||
Hiera is a hierarchical vision transformer architecture designed for efficient multiscale feature extraction
|
||||
in image processing tasks. It uses a series of transformer blocks organized into stages, with optional
|
||||
pooling and global attention mechanisms.
|
||||
|
||||
Args:
|
||||
embed_dim (int): Initial embedding dimension for the model.
|
||||
num_heads (int): Initial number of attention heads.
|
||||
drop_path_rate (float): Stochastic depth rate.
|
||||
q_pool (int): Number of query pooling stages.
|
||||
q_stride (tuple[int, int]): Downsampling stride between stages.
|
||||
stages (tuple[int, ...]): Number of blocks per stage.
|
||||
dim_mul (float): Dimension multiplier factor at stage transitions.
|
||||
head_mul (float): Head multiplier factor at stage transitions.
|
||||
window_pos_embed_bkg_spatial_size (tuple[int, int]): Spatial size for window positional embedding background.
|
||||
window_spec (tuple[int, ...]): Window sizes for each stage when not using global attention.
|
||||
global_att_blocks (tuple[int, ...]): Indices of blocks that use global attention.
|
||||
return_interm_layers (bool): Whether to return intermediate layer outputs.
|
||||
|
||||
Examples:
|
||||
>>> model = Hiera(embed_dim=96, num_heads=1, stages=(2, 3, 16, 3))
|
||||
>>> input_tensor = torch.randn(1, 3, 224, 224)
|
||||
>>> output_features = model(input_tensor)
|
||||
>>> for feat in output_features:
|
||||
... print(feat.shape)
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
assert len(stages) == len(window_spec)
|
||||
self.window_spec = window_spec
|
||||
|
||||
depth = sum(stages)
|
||||
self.q_stride = q_stride
|
||||
self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)]
|
||||
assert 0 <= q_pool <= len(self.stage_ends[:-1])
|
||||
self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool]
|
||||
self.return_interm_layers = return_interm_layers
|
||||
|
||||
self.patch_embed = PatchEmbed(
|
||||
embed_dim=embed_dim,
|
||||
kernel_size=(7, 7),
|
||||
stride=(4, 4),
|
||||
padding=(3, 3),
|
||||
)
|
||||
# Which blocks have global attention?
|
||||
self.global_att_blocks = global_att_blocks
|
||||
|
||||
# Windowed positional embedding (https://arxiv.org/abs/2311.05613)
|
||||
self.window_pos_embed_bkg_spatial_size = window_pos_embed_bkg_spatial_size
|
||||
self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *self.window_pos_embed_bkg_spatial_size))
|
||||
self.pos_embed_window = nn.Parameter(torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0]))
|
||||
|
||||
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
||||
|
||||
cur_stage = 1
|
||||
self.blocks = nn.ModuleList()
|
||||
|
||||
for i in range(depth):
|
||||
dim_out = embed_dim
|
||||
# Lags by a block, so first block of next stage uses an initial window size
|
||||
# of previous stage and final window size of current stage
|
||||
window_size = self.window_spec[cur_stage - 1]
|
||||
|
||||
if self.global_att_blocks is not None:
|
||||
window_size = 0 if i in self.global_att_blocks else window_size
|
||||
|
||||
if i - 1 in self.stage_ends:
|
||||
dim_out = int(embed_dim * dim_mul)
|
||||
num_heads = int(num_heads * head_mul)
|
||||
cur_stage += 1
|
||||
|
||||
block = MultiScaleBlock(
|
||||
dim=embed_dim,
|
||||
dim_out=dim_out,
|
||||
num_heads=num_heads,
|
||||
drop_path=dpr[i],
|
||||
q_stride=self.q_stride if i in self.q_pool_blocks else None,
|
||||
window_size=window_size,
|
||||
)
|
||||
|
||||
embed_dim = dim_out
|
||||
self.blocks.append(block)
|
||||
|
||||
self.channel_list = (
|
||||
[self.blocks[i].dim_out for i in self.stage_ends[::-1]]
|
||||
if return_interm_layers
|
||||
else [self.blocks[-1].dim_out]
|
||||
)
|
||||
|
||||
def _get_pos_embed(self, hw: tuple[int, int]) -> torch.Tensor:
|
||||
"""Generate positional embeddings by interpolating and combining window and background embeddings."""
|
||||
h, w = hw
|
||||
window_embed = self.pos_embed_window
|
||||
pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic")
|
||||
pos_embed = pos_embed + window_embed.tile([x // y for x, y in zip(pos_embed.shape, window_embed.shape)])
|
||||
pos_embed = pos_embed.permute(0, 2, 3, 1)
|
||||
return pos_embed
|
||||
|
||||
def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
|
||||
"""
|
||||
Perform forward pass through Hiera model, extracting multiscale features from input images.
|
||||
|
||||
Args:
|
||||
x (torch.Tensor): Input tensor with shape (B, C, H, W) representing a batch of images.
|
||||
|
||||
Returns:
|
||||
(list[torch.Tensor]): List of feature maps at different scales, each with shape (B, C_i, H_i, W_i), where
|
||||
C_i is the channel dimension and H_i, W_i are the spatial dimensions at scale i. The list is ordered
|
||||
from highest resolution (fine features) to lowest resolution (coarse features) if return_interm_layers
|
||||
is True, otherwise contains only the final output.
|
||||
|
||||
Examples:
|
||||
>>> model = Hiera(embed_dim=96, num_heads=1, stages=(2, 3, 16, 3))
|
||||
>>> input_tensor = torch.randn(1, 3, 224, 224)
|
||||
>>> output_features = model(input_tensor)
|
||||
>>> for feat in output_features:
|
||||
... print(feat.shape)
|
||||
"""
|
||||
x = self.patch_embed(x)
|
||||
# x: (B, H, W, C)
|
||||
|
||||
# Add positional embedding
|
||||
x = x + self._get_pos_embed(x.shape[1:3])
|
||||
|
||||
outputs = []
|
||||
for i, blk in enumerate(self.blocks):
|
||||
x = blk(x)
|
||||
if (i == self.stage_ends[-1]) or (i in self.stage_ends and self.return_interm_layers):
|
||||
feats = x.permute(0, 3, 1, 2)
|
||||
outputs.append(feats)
|
||||
|
||||
return outputs
|
||||
312
ultralytics/models/sam/modules/memory_attention.py
Normal file
312
ultralytics/models/sam/modules/memory_attention.py
Normal file
@@ -0,0 +1,312 @@
|
||||
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from .blocks import RoPEAttention
|
||||
|
||||
|
||||
class MemoryAttentionLayer(nn.Module):
|
||||
"""
|
||||
Implements a memory attention layer with self-attention and cross-attention mechanisms for neural networks.
|
||||
|
||||
This class combines self-attention, cross-attention, and feedforward components to process input tensors and
|
||||
generate memory-based attention outputs.
|
||||
|
||||
Attributes:
|
||||
d_model (int): Dimensionality of the model.
|
||||
dim_feedforward (int): Dimensionality of the feedforward network.
|
||||
dropout_value (float): Dropout rate for regularization.
|
||||
self_attn (RoPEAttention): Self-attention mechanism using RoPE (Rotary Position Embedding).
|
||||
cross_attn_image (RoPEAttention): Cross-attention mechanism for image processing.
|
||||
linear1 (nn.Linear): First linear layer of the feedforward network.
|
||||
linear2 (nn.Linear): Second linear layer of the feedforward network.
|
||||
norm1 (nn.LayerNorm): Layer normalization for self-attention output.
|
||||
norm2 (nn.LayerNorm): Layer normalization for cross-attention output.
|
||||
norm3 (nn.LayerNorm): Layer normalization for feedforward network output.
|
||||
dropout1 (nn.Dropout): Dropout layer after self-attention.
|
||||
dropout2 (nn.Dropout): Dropout layer after cross-attention.
|
||||
dropout3 (nn.Dropout): Dropout layer after feedforward network.
|
||||
activation (nn.ReLU): Activation function for the feedforward network.
|
||||
pos_enc_at_attn (bool): Flag to add positional encoding at attention.
|
||||
pos_enc_at_cross_attn_queries (bool): Flag to add positional encoding to cross-attention queries.
|
||||
pos_enc_at_cross_attn_keys (bool): Flag to add positional encoding to cross-attention keys.
|
||||
|
||||
Methods:
|
||||
forward: Performs the full memory attention operation on input tensors.
|
||||
_forward_sa: Performs self-attention on input tensor.
|
||||
_forward_ca: Performs cross-attention between target and memory tensors.
|
||||
|
||||
Examples:
|
||||
>>> layer = MemoryAttentionLayer(d_model=256, dim_feedforward=2048, dropout=0.1)
|
||||
>>> tgt = torch.randn(1, 100, 256)
|
||||
>>> memory = torch.randn(1, 100, 64)
|
||||
>>> pos = torch.randn(1, 100, 256)
|
||||
>>> query_pos = torch.randn(1, 100, 256)
|
||||
>>> output = layer(tgt, memory, pos, query_pos)
|
||||
>>> print(output.shape)
|
||||
torch.Size([1, 100, 256])
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
d_model: int = 256,
|
||||
dim_feedforward: int = 2048,
|
||||
dropout: float = 0.1,
|
||||
pos_enc_at_attn: bool = False,
|
||||
pos_enc_at_cross_attn_keys: bool = True,
|
||||
pos_enc_at_cross_attn_queries: bool = False,
|
||||
):
|
||||
"""
|
||||
Initialize a memory attention layer with self-attention, cross-attention, and feedforward components.
|
||||
|
||||
Args:
|
||||
d_model (int): Dimensionality of the model.
|
||||
dim_feedforward (int): Dimensionality of the feedforward network.
|
||||
dropout (float): Dropout rate for regularization.
|
||||
pos_enc_at_attn (bool): Whether to add positional encoding at attention.
|
||||
pos_enc_at_cross_attn_keys (bool): Whether to add positional encoding to cross-attention keys.
|
||||
pos_enc_at_cross_attn_queries (bool): Whether to add positional encoding to cross-attention queries.
|
||||
"""
|
||||
super().__init__()
|
||||
self.d_model = d_model
|
||||
self.dim_feedforward = dim_feedforward
|
||||
self.dropout_value = dropout
|
||||
self.self_attn = RoPEAttention(embedding_dim=256, num_heads=1, downsample_rate=1)
|
||||
self.cross_attn_image = RoPEAttention(
|
||||
rope_k_repeat=True,
|
||||
embedding_dim=256,
|
||||
num_heads=1,
|
||||
downsample_rate=1,
|
||||
kv_in_dim=64,
|
||||
)
|
||||
|
||||
# Implementation of Feedforward model
|
||||
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
||||
self.dropout = nn.Dropout(dropout)
|
||||
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
||||
|
||||
self.norm1 = nn.LayerNorm(d_model)
|
||||
self.norm2 = nn.LayerNorm(d_model)
|
||||
self.norm3 = nn.LayerNorm(d_model)
|
||||
self.dropout1 = nn.Dropout(dropout)
|
||||
self.dropout2 = nn.Dropout(dropout)
|
||||
self.dropout3 = nn.Dropout(dropout)
|
||||
|
||||
self.activation = nn.ReLU()
|
||||
|
||||
# Where to add pos enc
|
||||
self.pos_enc_at_attn = pos_enc_at_attn
|
||||
self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries
|
||||
self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys
|
||||
|
||||
def _forward_sa(self, tgt: torch.Tensor, query_pos: torch.Tensor | None) -> torch.Tensor:
|
||||
"""Perform self-attention on input tensor using positional encoding and RoPE attention mechanism."""
|
||||
tgt2 = self.norm1(tgt)
|
||||
q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2
|
||||
tgt2 = self.self_attn(q, k, v=tgt2)
|
||||
tgt = tgt + self.dropout1(tgt2)
|
||||
return tgt
|
||||
|
||||
def _forward_ca(
|
||||
self,
|
||||
tgt: torch.Tensor,
|
||||
memory: torch.Tensor,
|
||||
query_pos: torch.Tensor | None,
|
||||
pos: torch.Tensor | None,
|
||||
num_k_exclude_rope: int = 0,
|
||||
) -> torch.Tensor:
|
||||
"""Perform cross-attention between target and memory tensors using RoPEAttention mechanism."""
|
||||
kwds = {}
|
||||
if num_k_exclude_rope > 0:
|
||||
assert isinstance(self.cross_attn_image, RoPEAttention)
|
||||
kwds = {"num_k_exclude_rope": num_k_exclude_rope}
|
||||
|
||||
# Cross-Attention
|
||||
tgt2 = self.norm2(tgt)
|
||||
tgt2 = self.cross_attn_image(
|
||||
q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2,
|
||||
k=memory + pos if self.pos_enc_at_cross_attn_keys else memory,
|
||||
v=memory,
|
||||
**kwds,
|
||||
)
|
||||
tgt = tgt + self.dropout2(tgt2)
|
||||
return tgt
|
||||
|
||||
def forward(
|
||||
self,
|
||||
tgt: torch.Tensor,
|
||||
memory: torch.Tensor,
|
||||
pos: torch.Tensor | None = None,
|
||||
query_pos: torch.Tensor | None = None,
|
||||
num_k_exclude_rope: int = 0,
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Process input tensors through self-attention, cross-attention, and feedforward network layers.
|
||||
|
||||
Args:
|
||||
tgt (torch.Tensor): Target tensor for self-attention with shape (N, L, D).
|
||||
memory (torch.Tensor): Memory tensor for cross-attention with shape (N, S, D).
|
||||
pos (Optional[torch.Tensor]): Positional encoding for memory tensor.
|
||||
query_pos (Optional[torch.Tensor]): Positional encoding for target tensor.
|
||||
num_k_exclude_rope (int): Number of keys to exclude from rotary position embedding.
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): Processed tensor after attention and feedforward layers with shape (N, L, D).
|
||||
"""
|
||||
tgt = self._forward_sa(tgt, query_pos)
|
||||
tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope)
|
||||
# MLP
|
||||
tgt2 = self.norm3(tgt)
|
||||
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
|
||||
tgt = tgt + self.dropout3(tgt2)
|
||||
return tgt
|
||||
|
||||
|
||||
class MemoryAttention(nn.Module):
|
||||
"""
|
||||
Memory attention module for processing sequential data with self and cross-attention mechanisms.
|
||||
|
||||
This class implements a multi-layer attention mechanism that combines self-attention and cross-attention
|
||||
for processing sequential data, particularly useful in transformer-like architectures.
|
||||
|
||||
Attributes:
|
||||
d_model (int): The dimension of the model's hidden state.
|
||||
layers (nn.ModuleList): A list of MemoryAttentionLayer modules.
|
||||
num_layers (int): The number of attention layers.
|
||||
norm (nn.LayerNorm): Layer normalization applied to the output.
|
||||
pos_enc_at_input (bool): Whether to apply positional encoding at the input.
|
||||
batch_first (bool): Whether the input tensors are in batch-first format.
|
||||
|
||||
Methods:
|
||||
forward: Processes input tensors through the attention layers.
|
||||
|
||||
Examples:
|
||||
>>> d_model = 256
|
||||
>>> layer = MemoryAttentionLayer(d_model)
|
||||
>>> attention = MemoryAttention(d_model, pos_enc_at_input=True, layer=layer, num_layers=3)
|
||||
>>> curr = torch.randn(10, 32, d_model) # (seq_len, batch_size, d_model)
|
||||
>>> memory = torch.randn(20, 32, d_model) # (mem_len, batch_size, d_model)
|
||||
>>> curr_pos = torch.randn(10, 32, d_model)
|
||||
>>> memory_pos = torch.randn(20, 32, d_model)
|
||||
>>> output = attention(curr, memory, curr_pos, memory_pos)
|
||||
>>> print(output.shape)
|
||||
torch.Size([10, 32, 256])
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
d_model: int,
|
||||
pos_enc_at_input: bool,
|
||||
layer: nn.Module,
|
||||
num_layers: int,
|
||||
batch_first: bool = True, # Do layers expect batch first input?
|
||||
):
|
||||
"""
|
||||
Initialize MemoryAttention with specified layers and normalization for sequential data processing.
|
||||
|
||||
This class implements a multi-layer attention mechanism that combines self-attention and cross-attention
|
||||
for processing sequential data, particularly useful in transformer-like architectures.
|
||||
|
||||
Args:
|
||||
d_model (int): The dimension of the model's hidden state.
|
||||
pos_enc_at_input (bool): Whether to apply positional encoding at the input.
|
||||
layer (nn.Module): The attention layer to be used in the module.
|
||||
num_layers (int): The number of attention layers.
|
||||
batch_first (bool): Whether the input tensors are in batch-first format.
|
||||
|
||||
Examples:
|
||||
>>> d_model = 256
|
||||
>>> layer = MemoryAttentionLayer(d_model)
|
||||
>>> attention = MemoryAttention(d_model, pos_enc_at_input=True, layer=layer, num_layers=3)
|
||||
>>> curr = torch.randn(10, 32, d_model) # (seq_len, batch_size, d_model)
|
||||
>>> memory = torch.randn(20, 32, d_model) # (mem_len, batch_size, d_model)
|
||||
>>> curr_pos = torch.randn(10, 32, d_model)
|
||||
>>> memory_pos = torch.randn(20, 32, d_model)
|
||||
>>> output = attention(curr, memory, curr_pos, memory_pos)
|
||||
>>> print(output.shape)
|
||||
torch.Size([10, 32, 256])
|
||||
"""
|
||||
super().__init__()
|
||||
self.d_model = d_model
|
||||
self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])
|
||||
self.num_layers = num_layers
|
||||
self.norm = nn.LayerNorm(d_model)
|
||||
self.pos_enc_at_input = pos_enc_at_input
|
||||
self.batch_first = batch_first
|
||||
|
||||
def forward(
|
||||
self,
|
||||
curr: torch.Tensor, # self-attention inputs
|
||||
memory: torch.Tensor, # cross-attention inputs
|
||||
curr_pos: torch.Tensor | None = None, # pos_enc for self-attention inputs
|
||||
memory_pos: torch.Tensor | None = None, # pos_enc for cross-attention inputs
|
||||
num_obj_ptr_tokens: int = 0, # number of object pointer *tokens*
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Process inputs through attention layers, applying self and cross-attention with positional encoding.
|
||||
|
||||
Args:
|
||||
curr (torch.Tensor): Self-attention input tensor, representing the current state.
|
||||
memory (torch.Tensor): Cross-attention input tensor, representing memory information.
|
||||
curr_pos (Optional[torch.Tensor]): Positional encoding for self-attention inputs.
|
||||
memory_pos (Optional[torch.Tensor]): Positional encoding for cross-attention inputs.
|
||||
num_obj_ptr_tokens (int): Number of object pointer tokens to exclude from rotary position embedding.
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): Processed output tensor after applying attention layers and normalization.
|
||||
|
||||
Examples:
|
||||
>>> d_model = 256
|
||||
>>> layer = MemoryAttentionLayer(d_model)
|
||||
>>> attention = MemoryAttention(d_model, pos_enc_at_input=True, layer=layer, num_layers=3)
|
||||
>>> curr = torch.randn(10, 32, d_model) # (seq_len, batch_size, d_model)
|
||||
>>> memory = torch.randn(20, 32, d_model) # (mem_len, batch_size, d_model)
|
||||
>>> curr_pos = torch.randn(10, 32, d_model)
|
||||
>>> memory_pos = torch.randn(20, 32, d_model)
|
||||
>>> output = attention(curr, memory, curr_pos, memory_pos)
|
||||
>>> print(output.shape)
|
||||
torch.Size([10, 32, 256])
|
||||
"""
|
||||
if isinstance(curr, list):
|
||||
assert isinstance(curr_pos, list)
|
||||
assert len(curr) == len(curr_pos) == 1
|
||||
curr, curr_pos = curr[0], curr_pos[0]
|
||||
|
||||
assert curr.shape[1] == memory.shape[1], "Batch size must be the same for curr and memory"
|
||||
|
||||
output = curr
|
||||
if self.pos_enc_at_input and curr_pos is not None:
|
||||
output = output + 0.1 * curr_pos
|
||||
|
||||
if self.batch_first:
|
||||
# Convert to batch first
|
||||
output = output.transpose(0, 1)
|
||||
curr_pos = curr_pos.transpose(0, 1)
|
||||
memory = memory.transpose(0, 1)
|
||||
memory_pos = memory_pos.transpose(0, 1)
|
||||
|
||||
for layer in self.layers:
|
||||
kwds = {}
|
||||
if isinstance(layer.cross_attn_image, RoPEAttention):
|
||||
kwds = {"num_k_exclude_rope": num_obj_ptr_tokens}
|
||||
|
||||
output = layer(
|
||||
tgt=output,
|
||||
memory=memory,
|
||||
pos=memory_pos,
|
||||
query_pos=curr_pos,
|
||||
**kwds,
|
||||
)
|
||||
normed_output = self.norm(output)
|
||||
|
||||
if self.batch_first:
|
||||
# Convert back to seq first
|
||||
normed_output = normed_output.transpose(0, 1)
|
||||
curr_pos = curr_pos.transpose(0, 1)
|
||||
|
||||
return normed_output
|
||||
1033
ultralytics/models/sam/modules/sam.py
Normal file
1033
ultralytics/models/sam/modules/sam.py
Normal file
File diff suppressed because it is too large
Load Diff
998
ultralytics/models/sam/modules/tiny_encoder.py
Normal file
998
ultralytics/models/sam/modules/tiny_encoder.py
Normal file
@@ -0,0 +1,998 @@
|
||||
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
||||
|
||||
# --------------------------------------------------------
|
||||
# TinyViT Model Architecture
|
||||
# Copyright (c) 2022 Microsoft
|
||||
# Adapted from LeViT and Swin Transformer
|
||||
# LeViT: (https://github.com/facebookresearch/levit)
|
||||
# Swin: (https://github.com/microsoft/swin-transformer)
|
||||
# Build the TinyViT Model
|
||||
# --------------------------------------------------------
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import itertools
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
from ultralytics.nn.modules import LayerNorm2d
|
||||
from ultralytics.utils.instance import to_2tuple
|
||||
|
||||
|
||||
class Conv2d_BN(torch.nn.Sequential):
|
||||
"""
|
||||
A sequential container that performs 2D convolution followed by batch normalization.
|
||||
|
||||
This module combines a 2D convolution layer with batch normalization, providing a common building block
|
||||
for convolutional neural networks. The batch normalization weights and biases are initialized to specific
|
||||
values for optimal training performance.
|
||||
|
||||
Attributes:
|
||||
c (torch.nn.Conv2d): 2D convolution layer.
|
||||
bn (torch.nn.BatchNorm2d): Batch normalization layer.
|
||||
|
||||
Examples:
|
||||
>>> conv_bn = Conv2d_BN(3, 64, ks=3, stride=1, pad=1)
|
||||
>>> input_tensor = torch.randn(1, 3, 224, 224)
|
||||
>>> output = conv_bn(input_tensor)
|
||||
>>> print(output.shape)
|
||||
torch.Size([1, 64, 224, 224])
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
a: int,
|
||||
b: int,
|
||||
ks: int = 1,
|
||||
stride: int = 1,
|
||||
pad: int = 0,
|
||||
dilation: int = 1,
|
||||
groups: int = 1,
|
||||
bn_weight_init: float = 1,
|
||||
):
|
||||
"""
|
||||
Initialize a sequential container with 2D convolution followed by batch normalization.
|
||||
|
||||
Args:
|
||||
a (int): Number of input channels.
|
||||
b (int): Number of output channels.
|
||||
ks (int, optional): Kernel size for the convolution.
|
||||
stride (int, optional): Stride for the convolution.
|
||||
pad (int, optional): Padding for the convolution.
|
||||
dilation (int, optional): Dilation factor for the convolution.
|
||||
groups (int, optional): Number of groups for the convolution.
|
||||
bn_weight_init (float, optional): Initial value for batch normalization weight.
|
||||
"""
|
||||
super().__init__()
|
||||
self.add_module("c", torch.nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False))
|
||||
bn = torch.nn.BatchNorm2d(b)
|
||||
torch.nn.init.constant_(bn.weight, bn_weight_init)
|
||||
torch.nn.init.constant_(bn.bias, 0)
|
||||
self.add_module("bn", bn)
|
||||
|
||||
|
||||
class PatchEmbed(nn.Module):
|
||||
"""
|
||||
Embed images into patches and project them into a specified embedding dimension.
|
||||
|
||||
This module converts input images into patch embeddings using a sequence of convolutional layers,
|
||||
effectively downsampling the spatial dimensions while increasing the channel dimension.
|
||||
|
||||
Attributes:
|
||||
patches_resolution (tuple[int, int]): Resolution of the patches after embedding.
|
||||
num_patches (int): Total number of patches.
|
||||
in_chans (int): Number of input channels.
|
||||
embed_dim (int): Dimension of the embedding.
|
||||
seq (nn.Sequential): Sequence of convolutional and activation layers for patch embedding.
|
||||
|
||||
Examples:
|
||||
>>> import torch
|
||||
>>> patch_embed = PatchEmbed(in_chans=3, embed_dim=96, resolution=224, activation=nn.GELU)
|
||||
>>> x = torch.randn(1, 3, 224, 224)
|
||||
>>> output = patch_embed(x)
|
||||
>>> print(output.shape)
|
||||
torch.Size([1, 96, 56, 56])
|
||||
"""
|
||||
|
||||
def __init__(self, in_chans: int, embed_dim: int, resolution: int, activation):
|
||||
"""
|
||||
Initialize patch embedding with convolutional layers for image-to-patch conversion and projection.
|
||||
|
||||
Args:
|
||||
in_chans (int): Number of input channels.
|
||||
embed_dim (int): Dimension of the embedding.
|
||||
resolution (int): Input image resolution.
|
||||
activation (nn.Module): Activation function to use between convolutions.
|
||||
"""
|
||||
super().__init__()
|
||||
img_size: tuple[int, int] = to_2tuple(resolution)
|
||||
self.patches_resolution = (img_size[0] // 4, img_size[1] // 4)
|
||||
self.num_patches = self.patches_resolution[0] * self.patches_resolution[1]
|
||||
self.in_chans = in_chans
|
||||
self.embed_dim = embed_dim
|
||||
n = embed_dim
|
||||
self.seq = nn.Sequential(
|
||||
Conv2d_BN(in_chans, n // 2, 3, 2, 1),
|
||||
activation(),
|
||||
Conv2d_BN(n // 2, n, 3, 2, 1),
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""Process input tensor through patch embedding sequence, converting images to patch embeddings."""
|
||||
return self.seq(x)
|
||||
|
||||
|
||||
class MBConv(nn.Module):
|
||||
"""
|
||||
Mobile Inverted Bottleneck Conv (MBConv) layer, part of the EfficientNet architecture.
|
||||
|
||||
This module implements the mobile inverted bottleneck convolution with expansion, depthwise convolution,
|
||||
and projection phases, along with residual connections for improved gradient flow.
|
||||
|
||||
Attributes:
|
||||
in_chans (int): Number of input channels.
|
||||
hidden_chans (int): Number of hidden channels after expansion.
|
||||
out_chans (int): Number of output channels.
|
||||
conv1 (Conv2d_BN): First convolutional layer for channel expansion.
|
||||
act1 (nn.Module): First activation function.
|
||||
conv2 (Conv2d_BN): Depthwise convolutional layer.
|
||||
act2 (nn.Module): Second activation function.
|
||||
conv3 (Conv2d_BN): Final convolutional layer for projection.
|
||||
act3 (nn.Module): Third activation function.
|
||||
drop_path (nn.Module): Drop path layer (Identity for inference).
|
||||
|
||||
Examples:
|
||||
>>> in_chans, out_chans = 32, 64
|
||||
>>> mbconv = MBConv(in_chans, out_chans, expand_ratio=4, activation=nn.ReLU, drop_path=0.1)
|
||||
>>> x = torch.randn(1, in_chans, 56, 56)
|
||||
>>> output = mbconv(x)
|
||||
>>> print(output.shape)
|
||||
torch.Size([1, 64, 56, 56])
|
||||
"""
|
||||
|
||||
def __init__(self, in_chans: int, out_chans: int, expand_ratio: float, activation, drop_path: float):
|
||||
"""
|
||||
Initialize the MBConv layer with specified input/output channels, expansion ratio, and activation.
|
||||
|
||||
Args:
|
||||
in_chans (int): Number of input channels.
|
||||
out_chans (int): Number of output channels.
|
||||
expand_ratio (float): Channel expansion ratio for the hidden layer.
|
||||
activation (nn.Module): Activation function to use.
|
||||
drop_path (float): Drop path rate for stochastic depth.
|
||||
"""
|
||||
super().__init__()
|
||||
self.in_chans = in_chans
|
||||
self.hidden_chans = int(in_chans * expand_ratio)
|
||||
self.out_chans = out_chans
|
||||
|
||||
self.conv1 = Conv2d_BN(in_chans, self.hidden_chans, ks=1)
|
||||
self.act1 = activation()
|
||||
|
||||
self.conv2 = Conv2d_BN(self.hidden_chans, self.hidden_chans, ks=3, stride=1, pad=1, groups=self.hidden_chans)
|
||||
self.act2 = activation()
|
||||
|
||||
self.conv3 = Conv2d_BN(self.hidden_chans, out_chans, ks=1, bn_weight_init=0.0)
|
||||
self.act3 = activation()
|
||||
|
||||
# NOTE: `DropPath` is needed only for training.
|
||||
# self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
||||
self.drop_path = nn.Identity()
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""Implement the forward pass of MBConv, applying convolutions and skip connection."""
|
||||
shortcut = x
|
||||
x = self.conv1(x)
|
||||
x = self.act1(x)
|
||||
x = self.conv2(x)
|
||||
x = self.act2(x)
|
||||
x = self.conv3(x)
|
||||
x = self.drop_path(x)
|
||||
x += shortcut
|
||||
return self.act3(x)
|
||||
|
||||
|
||||
class PatchMerging(nn.Module):
|
||||
"""
|
||||
Merge neighboring patches in the feature map and project to a new dimension.
|
||||
|
||||
This class implements a patch merging operation that combines spatial information and adjusts the feature
|
||||
dimension using a series of convolutional layers with batch normalization. It effectively reduces spatial
|
||||
resolution while potentially increasing channel dimensions.
|
||||
|
||||
Attributes:
|
||||
input_resolution (tuple[int, int]): The input resolution (height, width) of the feature map.
|
||||
dim (int): The input dimension of the feature map.
|
||||
out_dim (int): The output dimension after merging and projection.
|
||||
act (nn.Module): The activation function used between convolutions.
|
||||
conv1 (Conv2d_BN): The first convolutional layer for dimension projection.
|
||||
conv2 (Conv2d_BN): The second convolutional layer for spatial merging.
|
||||
conv3 (Conv2d_BN): The third convolutional layer for final projection.
|
||||
|
||||
Examples:
|
||||
>>> input_resolution = (56, 56)
|
||||
>>> patch_merging = PatchMerging(input_resolution, dim=64, out_dim=128, activation=nn.ReLU)
|
||||
>>> x = torch.randn(4, 64, 56, 56)
|
||||
>>> output = patch_merging(x)
|
||||
>>> print(output.shape)
|
||||
torch.Size([4, 3136, 128])
|
||||
"""
|
||||
|
||||
def __init__(self, input_resolution: tuple[int, int], dim: int, out_dim: int, activation):
|
||||
"""
|
||||
Initialize the PatchMerging module for merging and projecting neighboring patches in feature maps.
|
||||
|
||||
Args:
|
||||
input_resolution (tuple[int, int]): The input resolution (height, width) of the feature map.
|
||||
dim (int): The input dimension of the feature map.
|
||||
out_dim (int): The output dimension after merging and projection.
|
||||
activation (nn.Module): The activation function used between convolutions.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.input_resolution = input_resolution
|
||||
self.dim = dim
|
||||
self.out_dim = out_dim
|
||||
self.act = activation()
|
||||
self.conv1 = Conv2d_BN(dim, out_dim, 1, 1, 0)
|
||||
stride_c = 1 if out_dim in {320, 448, 576} else 2
|
||||
self.conv2 = Conv2d_BN(out_dim, out_dim, 3, stride_c, 1, groups=out_dim)
|
||||
self.conv3 = Conv2d_BN(out_dim, out_dim, 1, 1, 0)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""Apply patch merging and dimension projection to the input feature map."""
|
||||
if x.ndim == 3:
|
||||
H, W = self.input_resolution
|
||||
B = len(x)
|
||||
# (B, C, H, W)
|
||||
x = x.view(B, H, W, -1).permute(0, 3, 1, 2)
|
||||
|
||||
x = self.conv1(x)
|
||||
x = self.act(x)
|
||||
|
||||
x = self.conv2(x)
|
||||
x = self.act(x)
|
||||
x = self.conv3(x)
|
||||
return x.flatten(2).transpose(1, 2)
|
||||
|
||||
|
||||
class ConvLayer(nn.Module):
|
||||
"""
|
||||
Convolutional Layer featuring multiple MobileNetV3-style inverted bottleneck convolutions (MBConv).
|
||||
|
||||
This layer optionally applies downsample operations to the output and supports gradient checkpointing
|
||||
for memory efficiency during training.
|
||||
|
||||
Attributes:
|
||||
dim (int): Dimensionality of the input and output.
|
||||
input_resolution (tuple[int, int]): Resolution of the input image.
|
||||
depth (int): Number of MBConv layers in the block.
|
||||
use_checkpoint (bool): Whether to use gradient checkpointing to save memory.
|
||||
blocks (nn.ModuleList): List of MBConv layers.
|
||||
downsample (Optional[nn.Module]): Function for downsampling the output.
|
||||
|
||||
Examples:
|
||||
>>> input_tensor = torch.randn(1, 64, 56, 56)
|
||||
>>> conv_layer = ConvLayer(64, (56, 56), depth=3, activation=nn.ReLU)
|
||||
>>> output = conv_layer(input_tensor)
|
||||
>>> print(output.shape)
|
||||
torch.Size([1, 3136, 128])
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dim: int,
|
||||
input_resolution: tuple[int, int],
|
||||
depth: int,
|
||||
activation,
|
||||
drop_path: float | list[float] = 0.0,
|
||||
downsample: nn.Module | None = None,
|
||||
use_checkpoint: bool = False,
|
||||
out_dim: int | None = None,
|
||||
conv_expand_ratio: float = 4.0,
|
||||
):
|
||||
"""
|
||||
Initialize the ConvLayer with the given dimensions and settings.
|
||||
|
||||
This layer consists of multiple MobileNetV3-style inverted bottleneck convolutions (MBConv) and
|
||||
optionally applies downsampling to the output.
|
||||
|
||||
Args:
|
||||
dim (int): The dimensionality of the input and output.
|
||||
input_resolution (tuple[int, int]): The resolution of the input image.
|
||||
depth (int): The number of MBConv layers in the block.
|
||||
activation (nn.Module): Activation function applied after each convolution.
|
||||
drop_path (float | list[float], optional): Drop path rate. Single float or a list of floats for each MBConv.
|
||||
downsample (Optional[nn.Module], optional): Function for downsampling the output. None to skip downsampling.
|
||||
use_checkpoint (bool, optional): Whether to use gradient checkpointing to save memory.
|
||||
out_dim (Optional[int], optional): The dimensionality of the output. None means it will be the same as `dim`.
|
||||
conv_expand_ratio (float, optional): Expansion ratio for the MBConv layers.
|
||||
"""
|
||||
super().__init__()
|
||||
self.dim = dim
|
||||
self.input_resolution = input_resolution
|
||||
self.depth = depth
|
||||
self.use_checkpoint = use_checkpoint
|
||||
|
||||
# Build blocks
|
||||
self.blocks = nn.ModuleList(
|
||||
[
|
||||
MBConv(
|
||||
dim,
|
||||
dim,
|
||||
conv_expand_ratio,
|
||||
activation,
|
||||
drop_path[i] if isinstance(drop_path, list) else drop_path,
|
||||
)
|
||||
for i in range(depth)
|
||||
]
|
||||
)
|
||||
|
||||
# Patch merging layer
|
||||
self.downsample = (
|
||||
None
|
||||
if downsample is None
|
||||
else downsample(input_resolution, dim=dim, out_dim=out_dim, activation=activation)
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""Process input through convolutional layers, applying MBConv blocks and optional downsampling."""
|
||||
for blk in self.blocks:
|
||||
x = torch.utils.checkpoint(blk, x) if self.use_checkpoint else blk(x) # warn: checkpoint is slow import
|
||||
return x if self.downsample is None else self.downsample(x)
|
||||
|
||||
|
||||
class MLP(nn.Module):
|
||||
"""
|
||||
Multi-layer Perceptron (MLP) module for transformer architectures.
|
||||
|
||||
This module applies layer normalization, two fully-connected layers with an activation function in between,
|
||||
and dropout. It is commonly used in transformer-based architectures for processing token embeddings.
|
||||
|
||||
Attributes:
|
||||
norm (nn.LayerNorm): Layer normalization applied to the input.
|
||||
fc1 (nn.Linear): First fully-connected layer.
|
||||
fc2 (nn.Linear): Second fully-connected layer.
|
||||
act (nn.Module): Activation function applied after the first fully-connected layer.
|
||||
drop (nn.Dropout): Dropout layer applied after the activation function.
|
||||
|
||||
Examples:
|
||||
>>> import torch
|
||||
>>> from torch import nn
|
||||
>>> mlp = MLP(in_features=256, hidden_features=512, out_features=256, activation=nn.GELU, drop=0.1)
|
||||
>>> x = torch.randn(32, 100, 256)
|
||||
>>> output = mlp(x)
|
||||
>>> print(output.shape)
|
||||
torch.Size([32, 100, 256])
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
in_features: int,
|
||||
hidden_features: int | None = None,
|
||||
out_features: int | None = None,
|
||||
activation=nn.GELU,
|
||||
drop: float = 0.0,
|
||||
):
|
||||
"""
|
||||
Initialize a multi-layer perceptron with configurable input, hidden, and output dimensions.
|
||||
|
||||
Args:
|
||||
in_features (int): Number of input features.
|
||||
hidden_features (Optional[int], optional): Number of hidden features.
|
||||
out_features (Optional[int], optional): Number of output features.
|
||||
activation (nn.Module): Activation function applied after the first fully-connected layer.
|
||||
drop (float, optional): Dropout probability.
|
||||
"""
|
||||
super().__init__()
|
||||
out_features = out_features or in_features
|
||||
hidden_features = hidden_features or in_features
|
||||
self.norm = nn.LayerNorm(in_features)
|
||||
self.fc1 = nn.Linear(in_features, hidden_features)
|
||||
self.fc2 = nn.Linear(hidden_features, out_features)
|
||||
self.act = activation()
|
||||
self.drop = nn.Dropout(drop)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""Apply MLP operations: layer norm, FC layers, activation, and dropout to the input tensor."""
|
||||
x = self.norm(x)
|
||||
x = self.fc1(x)
|
||||
x = self.act(x)
|
||||
x = self.drop(x)
|
||||
x = self.fc2(x)
|
||||
return self.drop(x)
|
||||
|
||||
|
||||
class Attention(torch.nn.Module):
|
||||
"""
|
||||
Multi-head attention module with spatial awareness and trainable attention biases.
|
||||
|
||||
This module implements a multi-head attention mechanism with support for spatial awareness, applying
|
||||
attention biases based on spatial resolution. It includes trainable attention biases for each unique
|
||||
offset between spatial positions in the resolution grid.
|
||||
|
||||
Attributes:
|
||||
num_heads (int): Number of attention heads.
|
||||
scale (float): Scaling factor for attention scores.
|
||||
key_dim (int): Dimensionality of the keys and queries.
|
||||
nh_kd (int): Product of num_heads and key_dim.
|
||||
d (int): Dimensionality of the value vectors.
|
||||
dh (int): Product of d and num_heads.
|
||||
attn_ratio (float): Attention ratio affecting the dimensions of the value vectors.
|
||||
norm (nn.LayerNorm): Layer normalization applied to input.
|
||||
qkv (nn.Linear): Linear layer for computing query, key, and value projections.
|
||||
proj (nn.Linear): Linear layer for final projection.
|
||||
attention_biases (nn.Parameter): Learnable attention biases.
|
||||
attention_bias_idxs (torch.Tensor): Indices for attention biases.
|
||||
ab (torch.Tensor): Cached attention biases for inference, deleted during training.
|
||||
|
||||
Examples:
|
||||
>>> attn = Attention(dim=256, key_dim=64, num_heads=8, resolution=(14, 14))
|
||||
>>> x = torch.randn(1, 196, 256)
|
||||
>>> output = attn(x)
|
||||
>>> print(output.shape)
|
||||
torch.Size([1, 196, 256])
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dim: int,
|
||||
key_dim: int,
|
||||
num_heads: int = 8,
|
||||
attn_ratio: float = 4,
|
||||
resolution: tuple[int, int] = (14, 14),
|
||||
):
|
||||
"""
|
||||
Initialize the Attention module for multi-head attention with spatial awareness.
|
||||
|
||||
This module implements a multi-head attention mechanism with support for spatial awareness, applying
|
||||
attention biases based on spatial resolution. It includes trainable attention biases for each unique
|
||||
offset between spatial positions in the resolution grid.
|
||||
|
||||
Args:
|
||||
dim (int): The dimensionality of the input and output.
|
||||
key_dim (int): The dimensionality of the keys and queries.
|
||||
num_heads (int, optional): Number of attention heads.
|
||||
attn_ratio (float, optional): Attention ratio, affecting the dimensions of the value vectors.
|
||||
resolution (tuple[int, int], optional): Spatial resolution of the input feature map.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
assert isinstance(resolution, tuple) and len(resolution) == 2, "'resolution' argument not tuple of length 2"
|
||||
self.num_heads = num_heads
|
||||
self.scale = key_dim**-0.5
|
||||
self.key_dim = key_dim
|
||||
self.nh_kd = nh_kd = key_dim * num_heads
|
||||
self.d = int(attn_ratio * key_dim)
|
||||
self.dh = int(attn_ratio * key_dim) * num_heads
|
||||
self.attn_ratio = attn_ratio
|
||||
h = self.dh + nh_kd * 2
|
||||
|
||||
self.norm = nn.LayerNorm(dim)
|
||||
self.qkv = nn.Linear(dim, h)
|
||||
self.proj = nn.Linear(self.dh, dim)
|
||||
|
||||
points = list(itertools.product(range(resolution[0]), range(resolution[1])))
|
||||
N = len(points)
|
||||
attention_offsets = {}
|
||||
idxs = []
|
||||
for p1 in points:
|
||||
for p2 in points:
|
||||
offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
|
||||
if offset not in attention_offsets:
|
||||
attention_offsets[offset] = len(attention_offsets)
|
||||
idxs.append(attention_offsets[offset])
|
||||
self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
|
||||
self.register_buffer("attention_bias_idxs", torch.LongTensor(idxs).view(N, N), persistent=False)
|
||||
|
||||
@torch.no_grad()
|
||||
def train(self, mode: bool = True):
|
||||
"""Set the module in training mode and handle the 'ab' attribute for cached attention biases."""
|
||||
super().train(mode)
|
||||
if mode and hasattr(self, "ab"):
|
||||
del self.ab
|
||||
else:
|
||||
self.ab = self.attention_biases[:, self.attention_bias_idxs]
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""Apply multi-head attention with spatial awareness and trainable attention biases."""
|
||||
B, N, _ = x.shape # B, N, C
|
||||
|
||||
# Normalization
|
||||
x = self.norm(x)
|
||||
|
||||
qkv = self.qkv(x)
|
||||
# (B, N, num_heads, d)
|
||||
q, k, v = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.d], dim=3)
|
||||
# (B, num_heads, N, d)
|
||||
q = q.permute(0, 2, 1, 3)
|
||||
k = k.permute(0, 2, 1, 3)
|
||||
v = v.permute(0, 2, 1, 3)
|
||||
self.ab = self.ab.to(self.attention_biases.device)
|
||||
|
||||
attn = (q @ k.transpose(-2, -1)) * self.scale + (
|
||||
self.attention_biases[:, self.attention_bias_idxs] if self.training else self.ab
|
||||
)
|
||||
attn = attn.softmax(dim=-1)
|
||||
x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh)
|
||||
return self.proj(x)
|
||||
|
||||
|
||||
class TinyViTBlock(nn.Module):
|
||||
"""
|
||||
TinyViT Block that applies self-attention and a local convolution to the input.
|
||||
|
||||
This block is a key component of the TinyViT architecture, combining self-attention mechanisms with
|
||||
local convolutions to process input features efficiently. It supports windowed attention for
|
||||
computational efficiency and includes residual connections.
|
||||
|
||||
Attributes:
|
||||
dim (int): The dimensionality of the input and output.
|
||||
input_resolution (tuple[int, int]): Spatial resolution of the input feature map.
|
||||
num_heads (int): Number of attention heads.
|
||||
window_size (int): Size of the attention window.
|
||||
mlp_ratio (float): Ratio of MLP hidden dimension to embedding dimension.
|
||||
drop_path (nn.Module): Stochastic depth layer, identity function during inference.
|
||||
attn (Attention): Self-attention module.
|
||||
mlp (MLP): Multi-layer perceptron module.
|
||||
local_conv (Conv2d_BN): Depth-wise local convolution layer.
|
||||
|
||||
Examples:
|
||||
>>> input_tensor = torch.randn(1, 196, 192)
|
||||
>>> block = TinyViTBlock(dim=192, input_resolution=(14, 14), num_heads=3)
|
||||
>>> output = block(input_tensor)
|
||||
>>> print(output.shape)
|
||||
torch.Size([1, 196, 192])
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dim: int,
|
||||
input_resolution: tuple[int, int],
|
||||
num_heads: int,
|
||||
window_size: int = 7,
|
||||
mlp_ratio: float = 4.0,
|
||||
drop: float = 0.0,
|
||||
drop_path: float = 0.0,
|
||||
local_conv_size: int = 3,
|
||||
activation=nn.GELU,
|
||||
):
|
||||
"""
|
||||
Initialize a TinyViT block with self-attention and local convolution.
|
||||
|
||||
This block is a key component of the TinyViT architecture, combining self-attention mechanisms with
|
||||
local convolutions to process input features efficiently.
|
||||
|
||||
Args:
|
||||
dim (int): Dimensionality of the input and output features.
|
||||
input_resolution (tuple[int, int]): Spatial resolution of the input feature map (height, width).
|
||||
num_heads (int): Number of attention heads.
|
||||
window_size (int, optional): Size of the attention window. Must be greater than 0.
|
||||
mlp_ratio (float, optional): Ratio of MLP hidden dimension to embedding dimension.
|
||||
drop (float, optional): Dropout rate.
|
||||
drop_path (float, optional): Stochastic depth rate.
|
||||
local_conv_size (int, optional): Kernel size of the local convolution.
|
||||
activation (nn.Module): Activation function for MLP.
|
||||
"""
|
||||
super().__init__()
|
||||
self.dim = dim
|
||||
self.input_resolution = input_resolution
|
||||
self.num_heads = num_heads
|
||||
assert window_size > 0, "window_size must be greater than 0"
|
||||
self.window_size = window_size
|
||||
self.mlp_ratio = mlp_ratio
|
||||
|
||||
# NOTE: `DropPath` is needed only for training.
|
||||
# self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
||||
self.drop_path = nn.Identity()
|
||||
|
||||
assert dim % num_heads == 0, "dim must be divisible by num_heads"
|
||||
head_dim = dim // num_heads
|
||||
|
||||
window_resolution = (window_size, window_size)
|
||||
self.attn = Attention(dim, head_dim, num_heads, attn_ratio=1, resolution=window_resolution)
|
||||
|
||||
mlp_hidden_dim = int(dim * mlp_ratio)
|
||||
mlp_activation = activation
|
||||
self.mlp = MLP(in_features=dim, hidden_features=mlp_hidden_dim, activation=mlp_activation, drop=drop)
|
||||
|
||||
pad = local_conv_size // 2
|
||||
self.local_conv = Conv2d_BN(dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""Apply self-attention, local convolution, and MLP operations to the input tensor."""
|
||||
h, w = self.input_resolution
|
||||
b, hw, c = x.shape # batch, height*width, channels
|
||||
assert hw == h * w, "input feature has wrong size"
|
||||
res_x = x
|
||||
if h == self.window_size and w == self.window_size:
|
||||
x = self.attn(x)
|
||||
else:
|
||||
x = x.view(b, h, w, c)
|
||||
pad_b = (self.window_size - h % self.window_size) % self.window_size
|
||||
pad_r = (self.window_size - w % self.window_size) % self.window_size
|
||||
padding = pad_b > 0 or pad_r > 0
|
||||
if padding:
|
||||
x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b))
|
||||
|
||||
pH, pW = h + pad_b, w + pad_r
|
||||
nH = pH // self.window_size
|
||||
nW = pW // self.window_size
|
||||
|
||||
# Window partition
|
||||
x = (
|
||||
x.view(b, nH, self.window_size, nW, self.window_size, c)
|
||||
.transpose(2, 3)
|
||||
.reshape(b * nH * nW, self.window_size * self.window_size, c)
|
||||
)
|
||||
x = self.attn(x)
|
||||
|
||||
# Window reverse
|
||||
x = x.view(b, nH, nW, self.window_size, self.window_size, c).transpose(2, 3).reshape(b, pH, pW, c)
|
||||
if padding:
|
||||
x = x[:, :h, :w].contiguous()
|
||||
|
||||
x = x.view(b, hw, c)
|
||||
|
||||
x = res_x + self.drop_path(x)
|
||||
x = x.transpose(1, 2).reshape(b, c, h, w)
|
||||
x = self.local_conv(x)
|
||||
x = x.view(b, c, hw).transpose(1, 2)
|
||||
|
||||
return x + self.drop_path(self.mlp(x))
|
||||
|
||||
def extra_repr(self) -> str:
|
||||
"""
|
||||
Return a string representation of the TinyViTBlock's parameters.
|
||||
|
||||
This method provides a formatted string containing key information about the TinyViTBlock, including its
|
||||
dimension, input resolution, number of attention heads, window size, and MLP ratio.
|
||||
|
||||
Returns:
|
||||
(str): A formatted string containing the block's parameters.
|
||||
|
||||
Examples:
|
||||
>>> block = TinyViTBlock(dim=192, input_resolution=(14, 14), num_heads=3, window_size=7, mlp_ratio=4.0)
|
||||
>>> print(block.extra_repr())
|
||||
dim=192, input_resolution=(14, 14), num_heads=3, window_size=7, mlp_ratio=4.0
|
||||
"""
|
||||
return (
|
||||
f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, "
|
||||
f"window_size={self.window_size}, mlp_ratio={self.mlp_ratio}"
|
||||
)
|
||||
|
||||
|
||||
class BasicLayer(nn.Module):
|
||||
"""
|
||||
A basic TinyViT layer for one stage in a TinyViT architecture.
|
||||
|
||||
This class represents a single layer in the TinyViT model, consisting of multiple TinyViT blocks
|
||||
and an optional downsampling operation. It processes features at a specific resolution and
|
||||
dimensionality within the overall architecture.
|
||||
|
||||
Attributes:
|
||||
dim (int): The dimensionality of the input and output features.
|
||||
input_resolution (tuple[int, int]): Spatial resolution of the input feature map.
|
||||
depth (int): Number of TinyViT blocks in this layer.
|
||||
use_checkpoint (bool): Whether to use gradient checkpointing to save memory.
|
||||
blocks (nn.ModuleList): List of TinyViT blocks that make up this layer.
|
||||
downsample (nn.Module | None): Downsample layer at the end of the layer, if specified.
|
||||
|
||||
Examples:
|
||||
>>> input_tensor = torch.randn(1, 3136, 192)
|
||||
>>> layer = BasicLayer(dim=192, input_resolution=(56, 56), depth=2, num_heads=3, window_size=7)
|
||||
>>> output = layer(input_tensor)
|
||||
>>> print(output.shape)
|
||||
torch.Size([1, 784, 384])
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
dim: int,
|
||||
input_resolution: tuple[int, int],
|
||||
depth: int,
|
||||
num_heads: int,
|
||||
window_size: int,
|
||||
mlp_ratio: float = 4.0,
|
||||
drop: float = 0.0,
|
||||
drop_path: float | list[float] = 0.0,
|
||||
downsample: nn.Module | None = None,
|
||||
use_checkpoint: bool = False,
|
||||
local_conv_size: int = 3,
|
||||
activation=nn.GELU,
|
||||
out_dim: int | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize a BasicLayer in the TinyViT architecture.
|
||||
|
||||
This layer consists of multiple TinyViT blocks and an optional downsampling operation. It is designed to
|
||||
process feature maps at a specific resolution and dimensionality within the TinyViT model.
|
||||
|
||||
Args:
|
||||
dim (int): Dimensionality of the input and output features.
|
||||
input_resolution (tuple[int, int]): Spatial resolution of the input feature map (height, width).
|
||||
depth (int): Number of TinyViT blocks in this layer.
|
||||
num_heads (int): Number of attention heads in each TinyViT block.
|
||||
window_size (int): Size of the local window for attention computation.
|
||||
mlp_ratio (float, optional): Ratio of MLP hidden dimension to embedding dimension.
|
||||
drop (float, optional): Dropout rate.
|
||||
drop_path (float | list[float], optional): Stochastic depth rate. Can be a float or a list of floats for each block.
|
||||
downsample (nn.Module | None, optional): Downsampling layer at the end of the layer. None to skip downsampling.
|
||||
use_checkpoint (bool, optional): Whether to use gradient checkpointing to save memory.
|
||||
local_conv_size (int, optional): Kernel size for the local convolution in each TinyViT block.
|
||||
activation (nn.Module): Activation function used in the MLP.
|
||||
out_dim (int | None, optional): Output dimension after downsampling. None means it will be the same as `dim`.
|
||||
"""
|
||||
super().__init__()
|
||||
self.dim = dim
|
||||
self.input_resolution = input_resolution
|
||||
self.depth = depth
|
||||
self.use_checkpoint = use_checkpoint
|
||||
|
||||
# Build blocks
|
||||
self.blocks = nn.ModuleList(
|
||||
[
|
||||
TinyViTBlock(
|
||||
dim=dim,
|
||||
input_resolution=input_resolution,
|
||||
num_heads=num_heads,
|
||||
window_size=window_size,
|
||||
mlp_ratio=mlp_ratio,
|
||||
drop=drop,
|
||||
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
|
||||
local_conv_size=local_conv_size,
|
||||
activation=activation,
|
||||
)
|
||||
for i in range(depth)
|
||||
]
|
||||
)
|
||||
|
||||
# Patch merging layer
|
||||
self.downsample = (
|
||||
None
|
||||
if downsample is None
|
||||
else downsample(input_resolution, dim=dim, out_dim=out_dim, activation=activation)
|
||||
)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""Process input through TinyViT blocks and optional downsampling."""
|
||||
for blk in self.blocks:
|
||||
x = torch.utils.checkpoint(blk, x) if self.use_checkpoint else blk(x) # warn: checkpoint is slow import
|
||||
return x if self.downsample is None else self.downsample(x)
|
||||
|
||||
def extra_repr(self) -> str:
|
||||
"""Return a string with the layer's parameters for printing."""
|
||||
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
|
||||
|
||||
|
||||
class TinyViT(nn.Module):
|
||||
"""
|
||||
TinyViT: A compact vision transformer architecture for efficient image classification and feature extraction.
|
||||
|
||||
This class implements the TinyViT model, which combines elements of vision transformers and convolutional
|
||||
neural networks for improved efficiency and performance on vision tasks. It features hierarchical processing
|
||||
with patch embedding, multiple stages of attention and convolution blocks, and a feature refinement neck.
|
||||
|
||||
Attributes:
|
||||
img_size (int): Input image size.
|
||||
num_classes (int): Number of classification classes.
|
||||
depths (tuple[int, int, int, int]): Number of blocks in each stage.
|
||||
num_layers (int): Total number of layers in the network.
|
||||
mlp_ratio (float): Ratio of MLP hidden dimension to embedding dimension.
|
||||
patch_embed (PatchEmbed): Module for patch embedding.
|
||||
patches_resolution (tuple[int, int]): Resolution of embedded patches.
|
||||
layers (nn.ModuleList): List of network layers.
|
||||
norm_head (nn.LayerNorm): Layer normalization for the classifier head.
|
||||
head (nn.Linear): Linear layer for final classification.
|
||||
neck (nn.Sequential): Neck module for feature refinement.
|
||||
|
||||
Examples:
|
||||
>>> model = TinyViT(img_size=224, num_classes=1000)
|
||||
>>> x = torch.randn(1, 3, 224, 224)
|
||||
>>> features = model.forward_features(x)
|
||||
>>> print(features.shape)
|
||||
torch.Size([1, 256, 56, 56])
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
img_size: int = 224,
|
||||
in_chans: int = 3,
|
||||
num_classes: int = 1000,
|
||||
embed_dims: tuple[int, int, int, int] = (96, 192, 384, 768),
|
||||
depths: tuple[int, int, int, int] = (2, 2, 6, 2),
|
||||
num_heads: tuple[int, int, int, int] = (3, 6, 12, 24),
|
||||
window_sizes: tuple[int, int, int, int] = (7, 7, 14, 7),
|
||||
mlp_ratio: float = 4.0,
|
||||
drop_rate: float = 0.0,
|
||||
drop_path_rate: float = 0.1,
|
||||
use_checkpoint: bool = False,
|
||||
mbconv_expand_ratio: float = 4.0,
|
||||
local_conv_size: int = 3,
|
||||
layer_lr_decay: float = 1.0,
|
||||
):
|
||||
"""
|
||||
Initialize the TinyViT model.
|
||||
|
||||
This constructor sets up the TinyViT architecture, including patch embedding, multiple layers of
|
||||
attention and convolution blocks, and a classification head.
|
||||
|
||||
Args:
|
||||
img_size (int, optional): Size of the input image.
|
||||
in_chans (int, optional): Number of input channels.
|
||||
num_classes (int, optional): Number of classes for classification.
|
||||
embed_dims (tuple[int, int, int, int], optional): Embedding dimensions for each stage.
|
||||
depths (tuple[int, int, int, int], optional): Number of blocks in each stage.
|
||||
num_heads (tuple[int, int, int, int], optional): Number of attention heads in each stage.
|
||||
window_sizes (tuple[int, int, int, int], optional): Window sizes for each stage.
|
||||
mlp_ratio (float, optional): Ratio of MLP hidden dim to embedding dim.
|
||||
drop_rate (float, optional): Dropout rate.
|
||||
drop_path_rate (float, optional): Stochastic depth rate.
|
||||
use_checkpoint (bool, optional): Whether to use checkpointing to save memory.
|
||||
mbconv_expand_ratio (float, optional): Expansion ratio for MBConv layer.
|
||||
local_conv_size (int, optional): Kernel size for local convolutions.
|
||||
layer_lr_decay (float, optional): Layer-wise learning rate decay factor.
|
||||
"""
|
||||
super().__init__()
|
||||
self.img_size = img_size
|
||||
self.num_classes = num_classes
|
||||
self.depths = depths
|
||||
self.num_layers = len(depths)
|
||||
self.mlp_ratio = mlp_ratio
|
||||
|
||||
activation = nn.GELU
|
||||
|
||||
self.patch_embed = PatchEmbed(
|
||||
in_chans=in_chans, embed_dim=embed_dims[0], resolution=img_size, activation=activation
|
||||
)
|
||||
|
||||
patches_resolution = self.patch_embed.patches_resolution
|
||||
self.patches_resolution = patches_resolution
|
||||
|
||||
# Stochastic depth
|
||||
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
|
||||
|
||||
# Build layers
|
||||
self.layers = nn.ModuleList()
|
||||
for i_layer in range(self.num_layers):
|
||||
kwargs = dict(
|
||||
dim=embed_dims[i_layer],
|
||||
input_resolution=(
|
||||
patches_resolution[0] // (2 ** (i_layer - 1 if i_layer == 3 else i_layer)),
|
||||
patches_resolution[1] // (2 ** (i_layer - 1 if i_layer == 3 else i_layer)),
|
||||
),
|
||||
# input_resolution=(patches_resolution[0] // (2 ** i_layer),
|
||||
# patches_resolution[1] // (2 ** i_layer)),
|
||||
depth=depths[i_layer],
|
||||
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
|
||||
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
|
||||
use_checkpoint=use_checkpoint,
|
||||
out_dim=embed_dims[min(i_layer + 1, len(embed_dims) - 1)],
|
||||
activation=activation,
|
||||
)
|
||||
if i_layer == 0:
|
||||
layer = ConvLayer(conv_expand_ratio=mbconv_expand_ratio, **kwargs)
|
||||
else:
|
||||
layer = BasicLayer(
|
||||
num_heads=num_heads[i_layer],
|
||||
window_size=window_sizes[i_layer],
|
||||
mlp_ratio=self.mlp_ratio,
|
||||
drop=drop_rate,
|
||||
local_conv_size=local_conv_size,
|
||||
**kwargs,
|
||||
)
|
||||
self.layers.append(layer)
|
||||
|
||||
# Classifier head
|
||||
self.norm_head = nn.LayerNorm(embed_dims[-1])
|
||||
self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity()
|
||||
|
||||
# Init weights
|
||||
self.apply(self._init_weights)
|
||||
self.set_layer_lr_decay(layer_lr_decay)
|
||||
self.neck = nn.Sequential(
|
||||
nn.Conv2d(
|
||||
embed_dims[-1],
|
||||
256,
|
||||
kernel_size=1,
|
||||
bias=False,
|
||||
),
|
||||
LayerNorm2d(256),
|
||||
nn.Conv2d(
|
||||
256,
|
||||
256,
|
||||
kernel_size=3,
|
||||
padding=1,
|
||||
bias=False,
|
||||
),
|
||||
LayerNorm2d(256),
|
||||
)
|
||||
|
||||
def set_layer_lr_decay(self, layer_lr_decay: float):
|
||||
"""Set layer-wise learning rate decay for the TinyViT model based on depth."""
|
||||
decay_rate = layer_lr_decay
|
||||
|
||||
# Layers -> blocks (depth)
|
||||
depth = sum(self.depths)
|
||||
lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)]
|
||||
|
||||
def _set_lr_scale(m, scale):
|
||||
"""Set the learning rate scale for each layer in the model based on the layer's depth."""
|
||||
for p in m.parameters():
|
||||
p.lr_scale = scale
|
||||
|
||||
self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0]))
|
||||
i = 0
|
||||
for layer in self.layers:
|
||||
for block in layer.blocks:
|
||||
block.apply(lambda x: _set_lr_scale(x, lr_scales[i]))
|
||||
i += 1
|
||||
if layer.downsample is not None:
|
||||
layer.downsample.apply(lambda x: _set_lr_scale(x, lr_scales[i - 1]))
|
||||
assert i == depth
|
||||
for m in {self.norm_head, self.head}:
|
||||
m.apply(lambda x: _set_lr_scale(x, lr_scales[-1]))
|
||||
|
||||
for k, p in self.named_parameters():
|
||||
p.param_name = k
|
||||
|
||||
def _check_lr_scale(m):
|
||||
"""Check if the learning rate scale attribute is present in module's parameters."""
|
||||
for p in m.parameters():
|
||||
assert hasattr(p, "lr_scale"), p.param_name
|
||||
|
||||
self.apply(_check_lr_scale)
|
||||
|
||||
@staticmethod
|
||||
def _init_weights(m):
|
||||
"""Initialize weights for linear and normalization layers in the TinyViT model."""
|
||||
if isinstance(m, nn.Linear):
|
||||
# NOTE: This initialization is needed only for training.
|
||||
# trunc_normal_(m.weight, std=.02)
|
||||
if m.bias is not None:
|
||||
nn.init.constant_(m.bias, 0)
|
||||
elif isinstance(m, nn.LayerNorm):
|
||||
nn.init.constant_(m.bias, 0)
|
||||
nn.init.constant_(m.weight, 1.0)
|
||||
|
||||
@torch.jit.ignore
|
||||
def no_weight_decay_keywords(self):
|
||||
"""Return a set of keywords for parameters that should not use weight decay."""
|
||||
return {"attention_biases"}
|
||||
|
||||
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""Process input through feature extraction layers, returning spatial features."""
|
||||
x = self.patch_embed(x) # x input is (N, C, H, W)
|
||||
|
||||
x = self.layers[0](x)
|
||||
start_i = 1
|
||||
|
||||
for i in range(start_i, len(self.layers)):
|
||||
layer = self.layers[i]
|
||||
x = layer(x)
|
||||
batch, _, channel = x.shape
|
||||
x = x.view(batch, self.patches_resolution[0] // 4, self.patches_resolution[1] // 4, channel)
|
||||
x = x.permute(0, 3, 1, 2)
|
||||
return self.neck(x)
|
||||
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
"""Perform the forward pass through the TinyViT model, extracting features from the input image."""
|
||||
return self.forward_features(x)
|
||||
|
||||
def set_imgsz(self, imgsz: list[int] = [1024, 1024]):
|
||||
"""Set image size to make model compatible with different image sizes."""
|
||||
imgsz = [s // 4 for s in imgsz]
|
||||
self.patches_resolution = imgsz
|
||||
for i, layer in enumerate(self.layers):
|
||||
input_resolution = (
|
||||
imgsz[0] // (2 ** (i - 1 if i == 3 else i)),
|
||||
imgsz[1] // (2 ** (i - 1 if i == 3 else i)),
|
||||
)
|
||||
layer.input_resolution = input_resolution
|
||||
if layer.downsample is not None:
|
||||
layer.downsample.input_resolution = input_resolution
|
||||
if isinstance(layer, BasicLayer):
|
||||
for b in layer.blocks:
|
||||
b.input_resolution = input_resolution
|
||||
354
ultralytics/models/sam/modules/transformer.py
Normal file
354
ultralytics/models/sam/modules/transformer.py
Normal file
@@ -0,0 +1,354 @@
|
||||
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
|
||||
import torch
|
||||
from torch import Tensor, nn
|
||||
|
||||
from ultralytics.nn.modules import MLPBlock
|
||||
|
||||
|
||||
class TwoWayTransformer(nn.Module):
|
||||
"""
|
||||
A Two-Way Transformer module for simultaneous attention to image and query points.
|
||||
|
||||
This class implements a specialized transformer decoder that attends to an input image using queries with
|
||||
supplied positional embeddings. It's useful for tasks like object detection, image segmentation, and point
|
||||
cloud processing.
|
||||
|
||||
Attributes:
|
||||
depth (int): Number of layers in the transformer.
|
||||
embedding_dim (int): Channel dimension for input embeddings.
|
||||
num_heads (int): Number of heads for multihead attention.
|
||||
mlp_dim (int): Internal channel dimension for the MLP block.
|
||||
layers (nn.ModuleList): List of TwoWayAttentionBlock layers composing the transformer.
|
||||
final_attn_token_to_image (Attention): Final attention layer from queries to image.
|
||||
norm_final_attn (nn.LayerNorm): Layer normalization applied to final queries.
|
||||
|
||||
Methods:
|
||||
forward: Process image and point embeddings through the transformer.
|
||||
|
||||
Examples:
|
||||
>>> transformer = TwoWayTransformer(depth=6, embedding_dim=256, num_heads=8, mlp_dim=2048)
|
||||
>>> image_embedding = torch.randn(1, 256, 32, 32)
|
||||
>>> image_pe = torch.randn(1, 256, 32, 32)
|
||||
>>> point_embedding = torch.randn(1, 100, 256)
|
||||
>>> output_queries, output_image = transformer(image_embedding, image_pe, point_embedding)
|
||||
>>> print(output_queries.shape, output_image.shape)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
depth: int,
|
||||
embedding_dim: int,
|
||||
num_heads: int,
|
||||
mlp_dim: int,
|
||||
activation: type[nn.Module] = nn.ReLU,
|
||||
attention_downsample_rate: int = 2,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize a Two-Way Transformer for simultaneous attention to image and query points.
|
||||
|
||||
Args:
|
||||
depth (int): Number of layers in the transformer.
|
||||
embedding_dim (int): Channel dimension for input embeddings.
|
||||
num_heads (int): Number of heads for multihead attention. Must divide embedding_dim.
|
||||
mlp_dim (int): Internal channel dimension for the MLP block.
|
||||
activation (Type[nn.Module], optional): Activation function to use in the MLP block.
|
||||
attention_downsample_rate (int, optional): Downsampling rate for attention mechanism.
|
||||
"""
|
||||
super().__init__()
|
||||
self.depth = depth
|
||||
self.embedding_dim = embedding_dim
|
||||
self.num_heads = num_heads
|
||||
self.mlp_dim = mlp_dim
|
||||
self.layers = nn.ModuleList()
|
||||
|
||||
for i in range(depth):
|
||||
self.layers.append(
|
||||
TwoWayAttentionBlock(
|
||||
embedding_dim=embedding_dim,
|
||||
num_heads=num_heads,
|
||||
mlp_dim=mlp_dim,
|
||||
activation=activation,
|
||||
attention_downsample_rate=attention_downsample_rate,
|
||||
skip_first_layer_pe=(i == 0),
|
||||
)
|
||||
)
|
||||
|
||||
self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
|
||||
self.norm_final_attn = nn.LayerNorm(embedding_dim)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
image_embedding: torch.Tensor,
|
||||
image_pe: torch.Tensor,
|
||||
point_embedding: torch.Tensor,
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Process image and point embeddings through the Two-Way Transformer.
|
||||
|
||||
Args:
|
||||
image_embedding (torch.Tensor): Image to attend to, with shape (B, embedding_dim, H, W).
|
||||
image_pe (torch.Tensor): Positional encoding to add to the image, with same shape as image_embedding.
|
||||
point_embedding (torch.Tensor): Embedding to add to query points, with shape (B, N_points, embedding_dim).
|
||||
|
||||
Returns:
|
||||
queries (torch.Tensor): Processed point embeddings with shape (B, N_points, embedding_dim).
|
||||
keys (torch.Tensor): Processed image embeddings with shape (B, H*W, embedding_dim).
|
||||
"""
|
||||
# BxCxHxW -> BxHWxC == B x N_image_tokens x C
|
||||
image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
|
||||
image_pe = image_pe.flatten(2).permute(0, 2, 1)
|
||||
|
||||
# Prepare queries
|
||||
queries = point_embedding
|
||||
keys = image_embedding
|
||||
|
||||
# Apply transformer blocks and final layernorm
|
||||
for layer in self.layers:
|
||||
queries, keys = layer(
|
||||
queries=queries,
|
||||
keys=keys,
|
||||
query_pe=point_embedding,
|
||||
key_pe=image_pe,
|
||||
)
|
||||
|
||||
# Apply the final attention layer from the points to the image
|
||||
q = queries + point_embedding
|
||||
k = keys + image_pe
|
||||
attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
|
||||
queries = queries + attn_out
|
||||
queries = self.norm_final_attn(queries)
|
||||
|
||||
return queries, keys
|
||||
|
||||
|
||||
class TwoWayAttentionBlock(nn.Module):
|
||||
"""
|
||||
A two-way attention block for simultaneous attention to image and query points.
|
||||
|
||||
This class implements a specialized transformer block with four main layers: self-attention on sparse inputs,
|
||||
cross-attention of sparse inputs to dense inputs, MLP block on sparse inputs, and cross-attention of dense
|
||||
inputs to sparse inputs.
|
||||
|
||||
Attributes:
|
||||
self_attn (Attention): Self-attention layer for queries.
|
||||
norm1 (nn.LayerNorm): Layer normalization after self-attention.
|
||||
cross_attn_token_to_image (Attention): Cross-attention layer from queries to keys.
|
||||
norm2 (nn.LayerNorm): Layer normalization after token-to-image attention.
|
||||
mlp (MLPBlock): MLP block for transforming query embeddings.
|
||||
norm3 (nn.LayerNorm): Layer normalization after MLP block.
|
||||
norm4 (nn.LayerNorm): Layer normalization after image-to-token attention.
|
||||
cross_attn_image_to_token (Attention): Cross-attention layer from keys to queries.
|
||||
skip_first_layer_pe (bool): Whether to skip positional encoding in the first layer.
|
||||
|
||||
Methods:
|
||||
forward: Apply self-attention and cross-attention to queries and keys.
|
||||
|
||||
Examples:
|
||||
>>> embedding_dim, num_heads = 256, 8
|
||||
>>> block = TwoWayAttentionBlock(embedding_dim, num_heads)
|
||||
>>> queries = torch.randn(1, 100, embedding_dim)
|
||||
>>> keys = torch.randn(1, 1000, embedding_dim)
|
||||
>>> query_pe = torch.randn(1, 100, embedding_dim)
|
||||
>>> key_pe = torch.randn(1, 1000, embedding_dim)
|
||||
>>> processed_queries, processed_keys = block(queries, keys, query_pe, key_pe)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embedding_dim: int,
|
||||
num_heads: int,
|
||||
mlp_dim: int = 2048,
|
||||
activation: type[nn.Module] = nn.ReLU,
|
||||
attention_downsample_rate: int = 2,
|
||||
skip_first_layer_pe: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize a TwoWayAttentionBlock for simultaneous attention to image and query points.
|
||||
|
||||
This block implements a specialized transformer layer with four main components: self-attention on sparse
|
||||
inputs, cross-attention of sparse inputs to dense inputs, MLP block on sparse inputs, and cross-attention
|
||||
of dense inputs to sparse inputs.
|
||||
|
||||
Args:
|
||||
embedding_dim (int): Channel dimension of the embeddings.
|
||||
num_heads (int): Number of attention heads in the attention layers.
|
||||
mlp_dim (int, optional): Hidden dimension of the MLP block.
|
||||
activation (Type[nn.Module], optional): Activation function for the MLP block.
|
||||
attention_downsample_rate (int, optional): Downsampling rate for the attention mechanism.
|
||||
skip_first_layer_pe (bool, optional): Whether to skip positional encoding in the first layer.
|
||||
"""
|
||||
super().__init__()
|
||||
self.self_attn = Attention(embedding_dim, num_heads)
|
||||
self.norm1 = nn.LayerNorm(embedding_dim)
|
||||
|
||||
self.cross_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
|
||||
self.norm2 = nn.LayerNorm(embedding_dim)
|
||||
|
||||
self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)
|
||||
self.norm3 = nn.LayerNorm(embedding_dim)
|
||||
|
||||
self.norm4 = nn.LayerNorm(embedding_dim)
|
||||
self.cross_attn_image_to_token = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate)
|
||||
|
||||
self.skip_first_layer_pe = skip_first_layer_pe
|
||||
|
||||
def forward(
|
||||
self, queries: torch.Tensor, keys: torch.Tensor, query_pe: torch.Tensor, key_pe: torch.Tensor
|
||||
) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Apply two-way attention to process query and key embeddings in a transformer block.
|
||||
|
||||
Args:
|
||||
queries (torch.Tensor): Query embeddings with shape (B, N_queries, embedding_dim).
|
||||
keys (torch.Tensor): Key embeddings with shape (B, N_keys, embedding_dim).
|
||||
query_pe (torch.Tensor): Positional encodings for queries with same shape as queries.
|
||||
key_pe (torch.Tensor): Positional encodings for keys with same shape as keys.
|
||||
|
||||
Returns:
|
||||
queries (torch.Tensor): Processed query embeddings with shape (B, N_queries, embedding_dim).
|
||||
keys (torch.Tensor): Processed key embeddings with shape (B, N_keys, embedding_dim).
|
||||
"""
|
||||
# Self attention block
|
||||
if self.skip_first_layer_pe:
|
||||
queries = self.self_attn(q=queries, k=queries, v=queries)
|
||||
else:
|
||||
q = queries + query_pe
|
||||
attn_out = self.self_attn(q=q, k=q, v=queries)
|
||||
queries = queries + attn_out
|
||||
queries = self.norm1(queries)
|
||||
|
||||
# Cross attention block, tokens attending to image embedding
|
||||
q = queries + query_pe
|
||||
k = keys + key_pe
|
||||
attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
|
||||
queries = queries + attn_out
|
||||
queries = self.norm2(queries)
|
||||
|
||||
# MLP block
|
||||
mlp_out = self.mlp(queries)
|
||||
queries = queries + mlp_out
|
||||
queries = self.norm3(queries)
|
||||
|
||||
# Cross attention block, image embedding attending to tokens
|
||||
q = queries + query_pe
|
||||
k = keys + key_pe
|
||||
attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
|
||||
keys = keys + attn_out
|
||||
keys = self.norm4(keys)
|
||||
|
||||
return queries, keys
|
||||
|
||||
|
||||
class Attention(nn.Module):
|
||||
"""
|
||||
An attention layer with downscaling capability for embedding size after projection.
|
||||
|
||||
This class implements a multi-head attention mechanism with the option to downsample the internal
|
||||
dimension of queries, keys, and values.
|
||||
|
||||
Attributes:
|
||||
embedding_dim (int): Dimensionality of input embeddings.
|
||||
kv_in_dim (int): Dimensionality of key and value inputs.
|
||||
internal_dim (int): Internal dimension after downsampling.
|
||||
num_heads (int): Number of attention heads.
|
||||
q_proj (nn.Linear): Linear projection for queries.
|
||||
k_proj (nn.Linear): Linear projection for keys.
|
||||
v_proj (nn.Linear): Linear projection for values.
|
||||
out_proj (nn.Linear): Linear projection for output.
|
||||
|
||||
Methods:
|
||||
_separate_heads: Separate input tensor into attention heads.
|
||||
_recombine_heads: Recombine separated attention heads.
|
||||
forward: Compute attention output for given query, key, and value tensors.
|
||||
|
||||
Examples:
|
||||
>>> attn = Attention(embedding_dim=256, num_heads=8, downsample_rate=2)
|
||||
>>> q = torch.randn(1, 100, 256)
|
||||
>>> k = v = torch.randn(1, 50, 256)
|
||||
>>> output = attn(q, k, v)
|
||||
>>> print(output.shape)
|
||||
torch.Size([1, 100, 256])
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embedding_dim: int,
|
||||
num_heads: int,
|
||||
downsample_rate: int = 1,
|
||||
kv_in_dim: int = None,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize the Attention module with specified dimensions and settings.
|
||||
|
||||
Args:
|
||||
embedding_dim (int): Dimensionality of input embeddings.
|
||||
num_heads (int): Number of attention heads.
|
||||
downsample_rate (int, optional): Factor by which internal dimensions are downsampled.
|
||||
kv_in_dim (int | None, optional): Dimensionality of key and value inputs. If None, uses embedding_dim.
|
||||
|
||||
Raises:
|
||||
AssertionError: If num_heads does not evenly divide the internal dim (embedding_dim / downsample_rate).
|
||||
"""
|
||||
super().__init__()
|
||||
self.embedding_dim = embedding_dim
|
||||
self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim
|
||||
self.internal_dim = embedding_dim // downsample_rate
|
||||
self.num_heads = num_heads
|
||||
assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim."
|
||||
|
||||
self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
|
||||
self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
|
||||
self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
|
||||
self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
|
||||
|
||||
@staticmethod
|
||||
def _separate_heads(x: torch.Tensor, num_heads: int) -> torch.Tensor:
|
||||
"""Separate the input tensor into the specified number of attention heads."""
|
||||
b, n, c = x.shape
|
||||
x = x.reshape(b, n, num_heads, c // num_heads)
|
||||
return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
|
||||
|
||||
@staticmethod
|
||||
def _recombine_heads(x: Tensor) -> Tensor:
|
||||
"""Recombine separated attention heads into a single tensor."""
|
||||
b, n_heads, n_tokens, c_per_head = x.shape
|
||||
x = x.transpose(1, 2)
|
||||
return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
|
||||
|
||||
def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
Apply multi-head attention to query, key, and value tensors with optional downsampling.
|
||||
|
||||
Args:
|
||||
q (torch.Tensor): Query tensor with shape (B, N_q, embedding_dim).
|
||||
k (torch.Tensor): Key tensor with shape (B, N_k, embedding_dim).
|
||||
v (torch.Tensor): Value tensor with shape (B, N_k, embedding_dim).
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): Output tensor after attention with shape (B, N_q, embedding_dim).
|
||||
"""
|
||||
# Input projections
|
||||
q = self.q_proj(q)
|
||||
k = self.k_proj(k)
|
||||
v = self.v_proj(v)
|
||||
|
||||
# Separate into heads
|
||||
q = self._separate_heads(q, self.num_heads)
|
||||
k = self._separate_heads(k, self.num_heads)
|
||||
v = self._separate_heads(v, self.num_heads)
|
||||
|
||||
# Attention
|
||||
_, _, _, c_per_head = q.shape
|
||||
attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens
|
||||
attn = attn / math.sqrt(c_per_head)
|
||||
attn = torch.softmax(attn, dim=-1)
|
||||
|
||||
# Get output
|
||||
out = attn @ v
|
||||
out = self._recombine_heads(out)
|
||||
return self.out_proj(out)
|
||||
388
ultralytics/models/sam/modules/utils.py
Normal file
388
ultralytics/models/sam/modules/utils.py
Normal file
@@ -0,0 +1,388 @@
|
||||
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
def select_closest_cond_frames(frame_idx: int, cond_frame_outputs: dict[int, Any], max_cond_frame_num: int):
|
||||
"""
|
||||
Select the closest conditioning frames to a given frame index.
|
||||
|
||||
Args:
|
||||
frame_idx (int): Current frame index.
|
||||
cond_frame_outputs (dict[int, Any]): Dictionary of conditioning frame outputs keyed by frame indices.
|
||||
max_cond_frame_num (int): Maximum number of conditioning frames to select.
|
||||
|
||||
Returns:
|
||||
selected_outputs (dict[int, Any]): Selected items from cond_frame_outputs.
|
||||
unselected_outputs (dict[int, Any]): Items not selected from cond_frame_outputs.
|
||||
|
||||
Examples:
|
||||
>>> frame_idx = 5
|
||||
>>> cond_frame_outputs = {1: "a", 3: "b", 7: "c", 9: "d"}
|
||||
>>> max_cond_frame_num = 2
|
||||
>>> selected, unselected = select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num)
|
||||
>>> print(selected)
|
||||
{3: 'b', 7: 'c'}
|
||||
>>> print(unselected)
|
||||
{1: 'a', 9: 'd'}
|
||||
"""
|
||||
if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num:
|
||||
selected_outputs = cond_frame_outputs
|
||||
unselected_outputs = {}
|
||||
else:
|
||||
assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames"
|
||||
selected_outputs = {}
|
||||
|
||||
# The closest conditioning frame before `frame_idx` (if any)
|
||||
idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None)
|
||||
if idx_before is not None:
|
||||
selected_outputs[idx_before] = cond_frame_outputs[idx_before]
|
||||
|
||||
# The closest conditioning frame after `frame_idx` (if any)
|
||||
idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None)
|
||||
if idx_after is not None:
|
||||
selected_outputs[idx_after] = cond_frame_outputs[idx_after]
|
||||
|
||||
# Add other temporally closest conditioning frames until reaching a total
|
||||
# of `max_cond_frame_num` conditioning frames.
|
||||
num_remain = max_cond_frame_num - len(selected_outputs)
|
||||
inds_remain = sorted(
|
||||
(t for t in cond_frame_outputs if t not in selected_outputs),
|
||||
key=lambda x: abs(x - frame_idx),
|
||||
)[:num_remain]
|
||||
selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain)
|
||||
unselected_outputs = {t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs}
|
||||
|
||||
return selected_outputs, unselected_outputs
|
||||
|
||||
|
||||
def get_1d_sine_pe(pos_inds: torch.Tensor, dim: int, temperature: float = 10000):
|
||||
"""
|
||||
Generate 1D sinusoidal positional embeddings for given positions and dimensions.
|
||||
|
||||
Args:
|
||||
pos_inds (torch.Tensor): Position indices for which to generate embeddings.
|
||||
dim (int): Dimension of the positional embeddings. Should be an even number.
|
||||
temperature (float, optional): Scaling factor for the frequency of the sinusoidal functions.
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): Sinusoidal positional embeddings with shape (pos_inds.shape, dim).
|
||||
|
||||
Examples:
|
||||
>>> pos = torch.tensor([0, 1, 2, 3])
|
||||
>>> embeddings = get_1d_sine_pe(pos, 128)
|
||||
>>> embeddings.shape
|
||||
torch.Size([4, 128])
|
||||
"""
|
||||
pe_dim = dim // 2
|
||||
dim_t = torch.arange(pe_dim, dtype=pos_inds.dtype, device=pos_inds.device)
|
||||
dim_t = temperature ** (2 * (dim_t // 2) / pe_dim)
|
||||
|
||||
pos_embed = pos_inds.unsqueeze(-1) / dim_t
|
||||
pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1)
|
||||
return pos_embed
|
||||
|
||||
|
||||
def init_t_xy(end_x: int, end_y: int):
|
||||
"""
|
||||
Initialize 1D and 2D coordinate tensors for a grid of specified dimensions.
|
||||
|
||||
This function creates coordinate tensors for a grid with dimensions end_x × end_y. It generates a linear index tensor
|
||||
and corresponding x and y coordinate tensors.
|
||||
|
||||
Args:
|
||||
end_x (int): Width of the grid (number of columns).
|
||||
end_y (int): Height of the grid (number of rows).
|
||||
|
||||
Returns:
|
||||
t_x (torch.Tensor): X-coordinates for each position, with shape (end_x * end_y).
|
||||
t_y (torch.Tensor): Y-coordinates for each position, with shape (end_x * end_y).
|
||||
|
||||
Examples:
|
||||
>>> t_x, t_y = init_t_xy(3, 2)
|
||||
>>> print(t_x)
|
||||
tensor([0., 1., 2., 0., 1., 2.])
|
||||
>>> print(t_y)
|
||||
tensor([0., 0., 0., 1., 1., 1.])
|
||||
"""
|
||||
t = torch.arange(end_x * end_y, dtype=torch.float32)
|
||||
t_x = (t % end_x).float()
|
||||
t_y = torch.div(t, end_x, rounding_mode="floor").float()
|
||||
return t_x, t_y
|
||||
|
||||
|
||||
def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float = 10000.0):
|
||||
"""
|
||||
Compute axial complex exponential positional encodings for 2D spatial positions in a grid.
|
||||
|
||||
This function generates complex exponential positional encodings for a 2D grid of spatial positions,
|
||||
using separate frequency components for the x and y dimensions.
|
||||
|
||||
Args:
|
||||
dim (int): Dimension of the positional encoding.
|
||||
end_x (int): Width of the 2D grid.
|
||||
end_y (int): Height of the 2D grid.
|
||||
theta (float, optional): Scaling factor for frequency computation.
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): Complex exponential positional encodings with shape (end_x*end_y, dim//2).
|
||||
|
||||
Examples:
|
||||
>>> dim, end_x, end_y = 128, 8, 8
|
||||
>>> freqs_cis = compute_axial_cis(dim, end_x, end_y)
|
||||
>>> freqs_cis.shape
|
||||
torch.Size([64, 64])
|
||||
"""
|
||||
freqs_x = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
|
||||
freqs_y = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
|
||||
|
||||
t_x, t_y = init_t_xy(end_x, end_y)
|
||||
freqs_x = torch.outer(t_x, freqs_x)
|
||||
freqs_y = torch.outer(t_y, freqs_y)
|
||||
freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x)
|
||||
freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y)
|
||||
return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1)
|
||||
|
||||
|
||||
def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
|
||||
"""
|
||||
Reshape frequency tensor for broadcasting with input tensor.
|
||||
|
||||
Reshapes a frequency tensor to ensure dimensional compatibility for broadcasting with an input tensor.
|
||||
This function is typically used in positional encoding operations.
|
||||
|
||||
Args:
|
||||
freqs_cis (torch.Tensor): Frequency tensor with shape matching the last two dimensions of x.
|
||||
x (torch.Tensor): Input tensor to broadcast with.
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): Reshaped frequency tensor ready for broadcasting with the input tensor.
|
||||
|
||||
Raises:
|
||||
AssertionError: If the shape of freqs_cis doesn't match the last two dimensions of x.
|
||||
"""
|
||||
ndim = x.ndim
|
||||
assert 0 <= 1 < ndim
|
||||
assert freqs_cis.shape == (x.shape[-2], x.shape[-1])
|
||||
shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)]
|
||||
return freqs_cis.view(*shape)
|
||||
|
||||
|
||||
def apply_rotary_enc(
|
||||
xq: torch.Tensor,
|
||||
xk: torch.Tensor,
|
||||
freqs_cis: torch.Tensor,
|
||||
repeat_freqs_k: bool = False,
|
||||
):
|
||||
"""
|
||||
Apply rotary positional encoding to query and key tensors.
|
||||
|
||||
This function applies rotary positional encoding (RoPE) to query and key tensors using complex-valued frequency
|
||||
components. RoPE is a technique that injects relative position information into self-attention mechanisms.
|
||||
|
||||
Args:
|
||||
xq (torch.Tensor): Query tensor to encode with positional information.
|
||||
xk (torch.Tensor): Key tensor to encode with positional information.
|
||||
freqs_cis (torch.Tensor): Complex-valued frequency components for rotary encoding with shape matching the
|
||||
last two dimensions of xq.
|
||||
repeat_freqs_k (bool, optional): Whether to repeat frequency components along sequence length dimension
|
||||
to match key sequence length.
|
||||
|
||||
Returns:
|
||||
xq_out (torch.Tensor): Query tensor with rotary positional encoding applied.
|
||||
xk_out (torch.Tensor): Key tensor with rotary positional encoding applied, or original xk if xk is empty.
|
||||
|
||||
Examples:
|
||||
>>> import torch
|
||||
>>> xq = torch.randn(2, 8, 16, 64) # [batch, heads, seq_len, dim]
|
||||
>>> xk = torch.randn(2, 8, 16, 64)
|
||||
>>> freqs_cis = compute_axial_cis(64, 4, 4) # For a 4x4 spatial grid with dim=64
|
||||
>>> q_encoded, k_encoded = apply_rotary_enc(xq, xk, freqs_cis)
|
||||
"""
|
||||
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
|
||||
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) if xk.shape[-2] != 0 else None
|
||||
freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
|
||||
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
|
||||
if xk_ is None:
|
||||
# No keys to rotate, due to dropout
|
||||
return xq_out.type_as(xq).to(xq.device), xk
|
||||
# Repeat freqs along seq_len dim to match k seq_len
|
||||
if repeat_freqs_k:
|
||||
r = xk_.shape[-2] // xq_.shape[-2]
|
||||
freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1)
|
||||
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
|
||||
return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device)
|
||||
|
||||
|
||||
def window_partition(x: torch.Tensor, window_size: int):
|
||||
"""
|
||||
Partition input tensor into non-overlapping windows with padding if needed.
|
||||
|
||||
Args:
|
||||
x (torch.Tensor): Input tensor with shape (B, H, W, C).
|
||||
window_size (int): Size of each window.
|
||||
|
||||
Returns:
|
||||
windows (torch.Tensor): Partitioned windows with shape (B * num_windows, window_size, window_size, C).
|
||||
padded_h_w (tuple[int, int]): Padded height and width before partition.
|
||||
|
||||
Examples:
|
||||
>>> x = torch.randn(1, 16, 16, 3)
|
||||
>>> windows, (Hp, Wp) = window_partition(x, window_size=4)
|
||||
>>> print(windows.shape, Hp, Wp)
|
||||
torch.Size([16, 4, 4, 3]) 16 16
|
||||
"""
|
||||
B, H, W, C = x.shape
|
||||
|
||||
pad_h = (window_size - H % window_size) % window_size
|
||||
pad_w = (window_size - W % window_size) % window_size
|
||||
if pad_h > 0 or pad_w > 0:
|
||||
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
|
||||
Hp, Wp = H + pad_h, W + pad_w
|
||||
|
||||
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
|
||||
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
||||
return windows, (Hp, Wp)
|
||||
|
||||
|
||||
def window_unpartition(windows: torch.Tensor, window_size: int, pad_hw: tuple[int, int], hw: tuple[int, int]):
|
||||
"""
|
||||
Unpartition windowed sequences into original sequences and remove padding.
|
||||
|
||||
This function reverses the windowing process, reconstructing the original input from windowed segments
|
||||
and removing any padding that was added during the windowing process.
|
||||
|
||||
Args:
|
||||
windows (torch.Tensor): Input tensor of windowed sequences with shape (B * num_windows, window_size,
|
||||
window_size, C), where B is the batch size, num_windows is the number of windows, window_size is
|
||||
the size of each window, and C is the number of channels.
|
||||
window_size (int): Size of each window.
|
||||
pad_hw (tuple[int, int]): Padded height and width (Hp, Wp) of the input before windowing.
|
||||
hw (tuple[int, int]): Original height and width (H, W) of the input before padding and windowing.
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): Unpartitioned sequences with shape (B, H, W, C), where B is the batch size, H and W
|
||||
are the original height and width, and C is the number of channels.
|
||||
|
||||
Examples:
|
||||
>>> windows = torch.rand(32, 8, 8, 64) # 32 windows of size 8x8 with 64 channels
|
||||
>>> pad_hw = (16, 16) # Padded height and width
|
||||
>>> hw = (15, 14) # Original height and width
|
||||
>>> x = window_unpartition(windows, window_size=8, pad_hw=pad_hw, hw=hw)
|
||||
>>> print(x.shape)
|
||||
torch.Size([1, 15, 14, 64])
|
||||
"""
|
||||
Hp, Wp = pad_hw
|
||||
H, W = hw
|
||||
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
|
||||
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
|
||||
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
|
||||
|
||||
if Hp > H or Wp > W:
|
||||
x = x[:, :H, :W, :].contiguous()
|
||||
return x
|
||||
|
||||
|
||||
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
|
||||
"""
|
||||
Extract relative positional embeddings based on query and key sizes.
|
||||
|
||||
Args:
|
||||
q_size (int): Size of the query.
|
||||
k_size (int): Size of the key.
|
||||
rel_pos (torch.Tensor): Relative position embeddings with shape (L, C), where L is the maximum relative
|
||||
distance and C is the embedding dimension.
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): Extracted positional embeddings according to relative positions, with shape (q_size,
|
||||
k_size, C).
|
||||
|
||||
Examples:
|
||||
>>> q_size, k_size = 8, 16
|
||||
>>> rel_pos = torch.randn(31, 64) # 31 = 2 * max(8, 16) - 1
|
||||
>>> extracted_pos = get_rel_pos(q_size, k_size, rel_pos)
|
||||
>>> print(extracted_pos.shape)
|
||||
torch.Size([8, 16, 64])
|
||||
"""
|
||||
max_rel_dist = int(2 * max(q_size, k_size) - 1)
|
||||
# Interpolate rel pos if needed.
|
||||
if rel_pos.shape[0] != max_rel_dist:
|
||||
# Interpolate rel pos.
|
||||
rel_pos_resized = F.interpolate(
|
||||
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
|
||||
size=max_rel_dist,
|
||||
mode="linear",
|
||||
)
|
||||
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
|
||||
else:
|
||||
rel_pos_resized = rel_pos
|
||||
|
||||
# Scale the coords with short length if shapes for q and k are different.
|
||||
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
|
||||
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
|
||||
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
|
||||
|
||||
return rel_pos_resized[relative_coords.long()]
|
||||
|
||||
|
||||
def add_decomposed_rel_pos(
|
||||
attn: torch.Tensor,
|
||||
q: torch.Tensor,
|
||||
rel_pos_h: torch.Tensor,
|
||||
rel_pos_w: torch.Tensor,
|
||||
q_size: tuple[int, int],
|
||||
k_size: tuple[int, int],
|
||||
) -> torch.Tensor:
|
||||
"""
|
||||
Add decomposed Relative Positional Embeddings to the attention map.
|
||||
|
||||
This function calculates and applies decomposed Relative Positional Embeddings as described in the MVITv2
|
||||
paper. It enhances the attention mechanism by incorporating spatial relationships between query and key
|
||||
positions.
|
||||
|
||||
Args:
|
||||
attn (torch.Tensor): Attention map with shape (B, q_h * q_w, k_h * k_w).
|
||||
q (torch.Tensor): Query tensor in the attention layer with shape (B, q_h * q_w, C).
|
||||
rel_pos_h (torch.Tensor): Relative position embeddings for height axis with shape (Lh, C).
|
||||
rel_pos_w (torch.Tensor): Relative position embeddings for width axis with shape (Lw, C).
|
||||
q_size (tuple[int, int]): Spatial sequence size of query q as (q_h, q_w).
|
||||
k_size (tuple[int, int]): Spatial sequence size of key k as (k_h, k_w).
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): Updated attention map with added relative positional embeddings, shape
|
||||
(B, q_h * q_w, k_h * k_w).
|
||||
|
||||
Examples:
|
||||
>>> B, C, q_h, q_w, k_h, k_w = 1, 64, 8, 8, 8, 8
|
||||
>>> attn = torch.rand(B, q_h * q_w, k_h * k_w)
|
||||
>>> q = torch.rand(B, q_h * q_w, C)
|
||||
>>> rel_pos_h = torch.rand(2 * max(q_h, k_h) - 1, C)
|
||||
>>> rel_pos_w = torch.rand(2 * max(q_w, k_w) - 1, C)
|
||||
>>> q_size, k_size = (q_h, q_w), (k_h, k_w)
|
||||
>>> updated_attn = add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size)
|
||||
>>> print(updated_attn.shape)
|
||||
torch.Size([1, 64, 64])
|
||||
|
||||
References:
|
||||
https://github.com/facebookresearch/mvit/blob/main/mvit/models/attention.py
|
||||
"""
|
||||
q_h, q_w = q_size
|
||||
k_h, k_w = k_size
|
||||
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
|
||||
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
|
||||
|
||||
B, _, dim = q.shape
|
||||
r_q = q.reshape(B, q_h, q_w, dim)
|
||||
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
|
||||
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
|
||||
|
||||
attn = (attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]).view(
|
||||
B, q_h * q_w, k_h * k_w
|
||||
)
|
||||
|
||||
return attn
|
||||
2042
ultralytics/models/sam/predict.py
Normal file
2042
ultralytics/models/sam/predict.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user