Files
Resume/ultralytics/cfg/default.yaml
2025-11-08 19:15:39 +01:00

131 lines
8.7 KiB
YAML
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
# Global configuration YAML with settings and hyperparameters for YOLO training, validation, prediction and export
# For documentation see https://docs.ultralytics.com/usage/cfg/
task: detect # (str) YOLO task, i.e. detect, segment, classify, pose, obb
mode: train # (str) YOLO mode, i.e. train, val, predict, export, track, benchmark
# Train settings -------------------------------------------------------------------------------------------------------
model: # (str, optional) path to model file, i.e. yolov8n.pt or yolov8n.yaml
data: # (str, optional) path to data file, i.e. coco8.yaml
epochs: 100 # (int) number of epochs to train for
time: # (float, optional) max hours to train; overrides epochs if set
patience: 100 # (int) early stop after N epochs without val improvement
batch: 16 # (int) batch size; use -1 for AutoBatch
imgsz: 640 # (int | list) train/val use int (square); predict/export may use [h,w]
save: True # (bool) save train checkpoints and predict results
save_period: -1 # (int) save checkpoint every N epochs; disabled if < 1
cache: False # (bool | str) cache images in RAM (True/'ram') or on 'disk' to speed dataloading; False disables
device: # (int | str | list) device: 0 or [0,1,2,3] for CUDA, 'cpu'/'mps', or -1/[-1,-1] to auto-select idle GPUs
workers: 8 # (int) dataloader workers (per RANK if DDP)
project: # (str, optional) project name for results root
name: # (str, optional) experiment name; results in 'project/name'
exist_ok: False # (bool) overwrite existing 'project/name' if True
pretrained: True # (bool | str) use pretrained weights (bool) or load weights from path (str)
optimizer: auto # (str) optimizer: SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, or auto
verbose: True # (bool) print verbose logs during training/val
seed: 0 # (int) random seed for reproducibility
deterministic: True # (bool) enable deterministic ops; reproducible but may be slower
single_cls: False # (bool) treat all classes as a single class
rect: False # (bool) rectangular batches for train; rectangular batching for val when mode='val'
cos_lr: False # (bool) cosine learning rate scheduler
close_mosaic: 10 # (int) disable mosaic augmentation for final N epochs (0 to keep enabled)
resume: False # (bool) resume training from last checkpoint in the run dir
amp: True # (bool) Automatic Mixed Precision (AMP) training; True runs AMP capability check
fraction: 1.0 # (float) fraction of training dataset to use (1.0 = all)
profile: False # (bool) profile ONNX/TensorRT speeds during training for loggers
freeze: # (int | list, optional) freeze first N layers (int) or specific layer indices (list)
multi_scale: False # (bool) multiscale training by varying image size
compile: False # (bool | str) enable torch.compile() backend='inductor'; True="default", False=off, or "default|reduce-overhead|max-autotune-no-cudagraphs"
# Segmentation
overlap_mask: True # (bool) merge instance masks into one mask during training (segment only)
mask_ratio: 4 # (int) mask downsample ratio (segment only)
# Classification
dropout: 0.0 # (float) dropout for classification head (classify only)
# Val/Test settings ----------------------------------------------------------------------------------------------------
val: True # (bool) run validation/testing during training
split: val # (str) dataset split to evaluate: 'val', 'test' or 'train'
save_json: False # (bool) save results to COCO JSON for external evaluation
conf: # (float, optional) confidence threshold; defaults: predict=0.25, val=0.001
iou: 0.7 # (float) IoU threshold used for NMS
max_det: 300 # (int) maximum number of detections per image
half: False # (bool) use half precision (FP16) if supported
dnn: False # (bool) use OpenCV DNN for ONNX inference
plots: True # (bool) save plots and images during train/val
# Predict settings -----------------------------------------------------------------------------------------------------
source: # (str, optional) path/dir/URL/stream for images or videos; e.g. 'ultralytics/assets' or '0' for webcam
vid_stride: 1 # (int) read every Nth frame for video sources
stream_buffer: False # (bool) True buffers all frames; False keeps the most recent frame for low-latency streams
visualize: False # (bool) visualize model features (predict) or TP/FP/FN confusion (val)
augment: False # (bool) apply test-time augmentation during prediction
agnostic_nms: False # (bool) class-agnostic NMS
classes: # (int | list[int], optional) filter by class id(s), e.g. 0 or [0,2,3]
retina_masks: False # (bool) use high-resolution segmentation masks (segment)
embed: # (list[int], optional) return feature embeddings from given layer indices
# Visualize settings ---------------------------------------------------------------------------------------------------
show: False # (bool) show images/videos in a window if supported
save_frames: False # (bool) save individual frames from video predictions
save_txt: False # (bool) save results as .txt files (xywh format)
save_conf: False # (bool) save confidence scores with results
save_crop: False # (bool) save cropped prediction regions to files
show_labels: True # (bool) draw class labels on images, e.g. 'person'
show_conf: True # (bool) draw confidence values on images, e.g. '0.99'
show_boxes: True # (bool) draw bounding boxes on images
line_width: # (int, optional) line width of boxes; auto-scales with image size if not set
# Export settings ------------------------------------------------------------------------------------------------------
format: torchscript # (str) target format, e.g. torchscript|onnx|openvino|engine|coreml|saved_model|pb|tflite|edgetpu|tfjs|paddle|mnn|ncnn|imx|rknn
keras: False # (bool) TF SavedModel only (format=saved_model); enable Keras layers during export
optimize: False # (bool) TorchScript only; apply mobile optimizations to the scripted model
int8: False # (bool) INT8/PTQ where supported (openvino, tflite, tfjs, engine, imx); needs calibration data/fraction
dynamic: False # (bool) dynamic shapes for torchscript, onnx, openvino, engine; enable variable image sizes
simplify: True # (bool) ONNX/engine only; run graph simplifier for cleaner ONNX before runtime conversion
opset: # (int, optional) ONNX/engine only; opset version for export; leave unset to use a tested default
workspace: # (float, optional) engine (TensorRT) only; workspace size in GiB, e.g. 4
nms: False # (bool) fuse NMS into exported model when backend supports; if True, conf/iou apply (agnostic_nms except coreml)
# Hyperparameters ------------------------------------------------------------------------------------------------------
lr0: 0.01 # (float) initial learning rate (SGD=1e-2, Adam/AdamW=1e-3)
lrf: 0.01 # (float) final LR fraction; final LR = lr0 * lrf
momentum: 0.937 # (float) SGD momentum or Adam beta1
weight_decay: 0.0005 # (float) weight decay (L2 regularization)
warmup_epochs: 3.0 # (float) warmup epochs (fractions allowed)
warmup_momentum: 0.8 # (float) initial momentum during warmup
warmup_bias_lr: 0.1 # (float) bias learning rate during warmup
box: 7.5 # (float) box loss gain
cls: 0.5 # (float) classification loss gain
dfl: 1.5 # (float) distribution focal loss gain
pose: 12.0 # (float) pose loss gain (pose tasks)
kobj: 1.0 # (float) keypoint objectness loss gain (pose tasks)
nbs: 64 # (int) nominal batch size used for loss normalization
hsv_h: 0.015 # (float) HSV hue augmentation fraction
hsv_s: 0.7 # (float) HSV saturation augmentation fraction
hsv_v: 0.4 # (float) HSV value (brightness) augmentation fraction
degrees: 0.0 # (float) rotation degrees (+/-)
translate: 0.1 # (float) translation fraction (+/-)
scale: 0.5 # (float) scale gain (+/-)
shear: 0.0 # (float) shear degrees (+/-)
perspective: 0.0 # (float) perspective fraction (00.001 typical)
flipud: 0.0 # (float) vertical flip probability
fliplr: 0.5 # (float) horizontal flip probability
bgr: 0.0 # (float) RGB↔BGR channel swap probability
mosaic: 1.0 # (float) mosaic augmentation probability
mixup: 0.0 # (float) MixUp augmentation probability
cutmix: 0.0 # (float) CutMix augmentation probability
copy_paste: 0.0 # (float) segmentation copy-paste probability
copy_paste_mode: flip # (str) copy-paste strategy for segmentation: flip or mixup
auto_augment: randaugment # (str) classification auto augmentation policy: randaugment, autoaugment, augmix
erasing: 0.4 # (float) random erasing probability for classification (00.9), <1.0
# Custom config.yaml ---------------------------------------------------------------------------------------------------
cfg: # (str, optional) path to a config.yaml that overrides defaults
# Tracker settings ------------------------------------------------------------------------------------------------------
tracker: botsort.yaml # (str) tracker config file: botsort.yaml or bytetrack.yaml