加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
克隆/下载
train.py 51.64 KB
一键复制 编辑 原始数据 按行查看 历史
Harri 提交于 2024-03-03 10:02 . init
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738
import argparse
import time
import yaml
import os
import logging
import numpy as np
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
from spikingjelly.clock_driven import functional
from spikingjelly.datasets.cifar10_dvs import CIFAR10DVS
from spikingjelly.datasets.dvs128_gesture import DVS128Gesture
import torch
import torch.nn as nn
import torchvision.utils
import torchvision.transforms as transforms
from torch.nn.parallel import DistributedDataParallel as NativeDDP
import torchinfo
from timm.data import (
create_dataset,
create_loader,
resolve_data_config,
Mixup,
FastCollateMixup,
AugMixDataset,
)
from timm.models import (
create_model,
safe_model_name,
resume_checkpoint,
load_checkpoint,
convert_splitbn_model,
model_parameters,
)
from timm.models.helpers import clean_state_dict
from timm.utils import *
from timm.loss import (
LabelSmoothingCrossEntropy,
SoftTargetCrossEntropy,
JsdCrossEntropy,
BinaryCrossEntropy,
)
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
import model, dvs_utils, criterion
##
from datasets.action.dataloaders import create_datasets as create_action_datasets
from datasets.thu_chl.dataloaders import defualt_create_datasets as create_thu_chl_datasets
from datasets.DVS_Lip.dataloaders import defualt_create_datasets as create_dvs_lip_datasets
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, "autocast") is not None:
has_native_amp = True
except AttributeError:
pass
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
def resume_checkpoint(
model, checkpoint_path, optimizer=None, loss_scaler=None, log_info=True
):
resume_epoch = None
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location="cpu")
if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
if log_info:
_logger.info("Restoring model state from checkpoint...")
state_dict = clean_state_dict(checkpoint["state_dict"])
model.load_state_dict(state_dict, strict=False)
if optimizer is not None and "optimizer" in checkpoint:
if log_info:
_logger.info("Restoring optimizer state from checkpoint...")
optimizer.load_state_dict(checkpoint["optimizer"])
if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint:
if log_info:
_logger.info("Restoring AMP loss scaler state from checkpoint...")
loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key])
if "epoch" in checkpoint:
resume_epoch = checkpoint["epoch"]
if "version" in checkpoint and checkpoint["version"] > 1:
resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save
if log_info:
_logger.info(
"Loaded checkpoint '{}' (epoch {})".format(
checkpoint_path, checkpoint["epoch"]
)
)
else:
model.load_state_dict(checkpoint)
if log_info:
_logger.info("Loaded checkpoint '{}'".format(checkpoint_path))
return resume_epoch
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
torch.backends.cudnn.benchmark = True
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(
description="Training Config", add_help=False
)
parser.add_argument(
"-c",
"--config",
default="conf/thu_chl/2_256_200E_t10.yml",
type=str,
metavar="FILE",
help="YAML config file specifying default arguments",
)
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
# Dataset / Model parameters
parser.add_argument(
"-data-dir",
metavar="DIR",
default="",
help="path to dataset",
)
parser.add_argument(
"--dataset",
"-d",
metavar="NAME",
default="torch/cifar10",
help="dataset type (default: ImageFolder/ImageTar if empty)",
)
parser.add_argument(
"--train-split",
metavar="NAME",
default="train",
help="dataset train split (default: train)",
)
parser.add_argument(
"--val-split",
metavar="NAME",
default="validation",
help="dataset validation split (default: validation)",
)
parser.add_argument(
"--train-split-path",
type=str,
default=None,
metavar="N",
help="",
)
parser.add_argument(
"--model",
default="sdt",
type=str,
metavar="MODEL",
help='Name of model to train (default: "sdt")',
)
parser.add_argument(
"--pooling-stat",
default="1111",
type=str,
help="pooling layers in SPS moduls",
)
parser.add_argument(
"--TET",
default=False,
type=bool,
help="",
)
parser.add_argument(
"--TET-means",
default=1.0,
type=float,
help="",
)
parser.add_argument(
"--TET-lamb",
default=0.0,
type=float,
help="",
)
parser.add_argument(
"--spike-mode",
default="lif",
type=str,
help="",
)
parser.add_argument(
"--layer",
default=4,
type=int,
help="",
)
parser.add_argument(
"--in-channels",
default=3,
type=int,
help="",
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="Start with pretrained version of specified network (if avail)",
)
parser.add_argument(
"--initial-checkpoint",
default="",
type=str,
metavar="PATH",
help="Initialize model from this checkpoint (default: none)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="Resume full model and optimizer state from checkpoint (default: none)",
)
parser.add_argument(
"--no-resume-opt",
action="store_true",
default=False,
help="prevent resume of optimizer state when resuming model",
)
parser.add_argument(
"--num-classes",
type=int,
default=1000,
metavar="N",
help="number of label classes (Model default if None)",
)
parser.add_argument(
"--time-steps",
type=int,
default=4,
metavar="N",
help="",
)
parser.add_argument(
"--num-heads",
type=int,
default=8,
metavar="N",
help="",
)
parser.add_argument(
"--patch-size", type=int, default=None, metavar="N", help="Image patch size"
)
parser.add_argument(
"--mlp-ratio",
type=int,
default=4,
metavar="N",
help="expand ration of embedding dimension in MLP block",
)
parser.add_argument(
"--gp",
default=None,
type=str,
metavar="POOL",
help="Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.",
)
parser.add_argument(
"--img-size",
type=int,
default=None,
metavar="N",
help="Image patch size (default: None => model default)",
)
parser.add_argument(
"--input-size",
default=None,
nargs=3,
type=int,
metavar="N N N",
help="Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty",
)
parser.add_argument(
"--crop-pct",
default=None,
type=float,
metavar="N",
help="Input image center crop percent (for validation only)",
)
parser.add_argument(
"--mean",
type=float,
nargs="+",
default=None,
metavar="MEAN",
help="Override mean pixel value of dataset",
)
parser.add_argument(
"--std",
type=float,
nargs="+",
default=None,
metavar="STD",
help="Override std deviation of of dataset",
)
parser.add_argument(
"--interpolation",
default="",
type=str,
metavar="NAME",
help="Image resize interpolation type (overrides model)",
)
parser.add_argument(
"-b",
"--batch-size",
type=int,
default=32,
metavar="N",
help="input batch size for training (default: 32)",
)
parser.add_argument(
"-vb",
"--val-batch-size",
type=int,
default=16,
metavar="N",
help="input val batch size for training (default: 32)",
)
# Optimizer parameters
parser.add_argument(
"--opt",
default="sgd",
type=str,
metavar="OPTIMIZER",
help='Optimizer (default: "sgd")',
)
parser.add_argument(
"--opt-eps",
default=None,
type=float,
metavar="EPSILON",
help="Optimizer Epsilon (default: None, use opt default)",
)
parser.add_argument(
"--opt-betas",
default=None,
type=float,
nargs="+",
metavar="BETA",
help="Optimizer Betas (default: None, use opt default)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
metavar="M",
help="Optimizer momentum (default: 0.9)",
)
parser.add_argument(
"--weight-decay", type=float, default=0.0001, help="weight decay (default: 0.0001)"
)
parser.add_argument(
"--clip-grad",
type=float,
default=None,
metavar="NORM",
help="Clip gradient norm (default: None, no clipping)",
)
parser.add_argument(
"--clip-mode",
type=str,
default="norm",
help='Gradient clipping mode. One of ("norm", "value", "agc")',
)
# Learning rate schedule parameters
parser.add_argument(
"--sched",
default="step",
type=str,
metavar="SCHEDULER",
help='LR scheduler (default: "step"',
)
parser.add_argument(
"--lr", type=float, default=0.01, metavar="LR", help="learning rate (default: 0.01)"
)
parser.add_argument(
"--lr-noise",
type=float,
nargs="+",
default=None,
metavar="pct, pct",
help="learning rate noise on/off epoch percentages",
)
parser.add_argument(
"--lr-noise-pct",
type=float,
default=0.67,
metavar="PERCENT",
help="learning rate noise limit percent (default: 0.67)",
)
parser.add_argument(
"--lr-noise-std",
type=float,
default=1.0,
metavar="STDDEV",
help="learning rate noise std-dev (default: 1.0)",
)
parser.add_argument(
"--lr-cycle-mul",
type=float,
default=1.0,
metavar="MULT",
help="learning rate cycle len multiplier (default: 1.0)",
)
parser.add_argument(
"--lr-cycle-limit",
type=int,
default=1,
metavar="N",
help="learning rate cycle limit",
)
parser.add_argument(
"--warmup-lr",
type=float,
default=0.0001,
metavar="LR",
help="warmup learning rate (default: 0.0001)",
)
parser.add_argument(
"--min-lr",
type=float,
default=1e-5,
metavar="LR",
help="lower lr bound for cyclic schedulers that hit 0 (1e-5)",
)
parser.add_argument(
"--epochs",
type=int,
default=200,
metavar="N",
help="number of epochs to train (default: 2)",
)
parser.add_argument(
"--epoch-repeats",
type=float,
default=0.0,
metavar="N",
help="epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).",
)
parser.add_argument(
"--start-epoch",
default=None,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"--decay-epochs",
type=float,
default=30,
metavar="N",
help="epoch interval to decay LR",
)
parser.add_argument(
"--warmup-epochs",
type=int,
default=3,
metavar="N",
help="epochs to warmup LR, if scheduler supports",
)
parser.add_argument(
"--cooldown-epochs",
type=int,
default=10,
metavar="N",
help="epochs to cooldown LR at min_lr, after cyclic schedule ends",
)
parser.add_argument(
"--patience-epochs",
type=int,
default=10,
metavar="N",
help="patience epochs for Plateau LR scheduler (default: 10",
)
parser.add_argument(
"--decay-rate",
"--dr",
type=float,
default=0.1,
metavar="RATE",
help="LR decay rate (default: 0.1)",
)
# Augmentation & regularization parameters
parser.add_argument(
"--no-aug",
action="store_true",
default=False,
help="Disable all training augmentation, override other train aug args",
)
parser.add_argument(
"--scale",
type=float,
nargs="+",
default=[0.08, 1.0],
metavar="PCT",
help="Random resize scale (default: 0.08 1.0)",
)
parser.add_argument(
"--ratio",
type=float,
nargs="+",
default=[3.0 / 4.0, 4.0 / 3.0],
metavar="RATIO",
help="Random resize aspect ratio (default: 0.75 1.33)",
)
parser.add_argument(
"--hflip", type=float, default=0.5, help="Horizontal flip training aug probability"
)
parser.add_argument(
"--vflip", type=float, default=0.0, help="Vertical flip training aug probability"
)
parser.add_argument(
"--color-jitter",
type=float,
default=0.4,
metavar="PCT",
help="Color jitter factor (default: 0.4)",
)
parser.add_argument(
"--aa",
type=str,
default=None,
metavar="NAME",
help='Use AutoAugment policy. "v0" or "original". (default: None)',
),
parser.add_argument(
"--aug-splits",
type=int,
default=0,
help="Number of augmentation splits (default: 0, valid: 0 or >=2)",
)
parser.add_argument(
"--jsd",
action="store_true",
default=False,
help="Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.",
)
parser.add_argument(
"--bce-loss",
action="store_true",
default=False,
help="Enable BCE loss w/ Mixup/CutMix use.",
)
parser.add_argument(
"--bce-target-thresh",
type=float,
default=None,
help="Threshold for binarizing softened BCE targets (default: None, disabled)",
)
parser.add_argument(
"--reprob",
type=float,
default=0.0,
metavar="PCT",
help="Random erase prob (default: 0.)",
)
parser.add_argument(
"--remode", type=str, default="const", help='Random erase mode (default: "const")'
)
parser.add_argument(
"--recount", type=int, default=1, help="Random erase count (default: 1)"
)
parser.add_argument(
"--resplit",
action="store_true",
default=False,
help="Do not random erase first (clean) augmentation split",
)
parser.add_argument(
"--mixup",
type=float,
default=0.0,
help="mixup alpha, mixup enabled if > 0. (default: 0.)",
)
parser.add_argument(
"--cutmix",
type=float,
default=0.0,
help="cutmix alpha, cutmix enabled if > 0. (default: 0.)",
)
parser.add_argument(
"--cutmix-minmax",
type=float,
nargs="+",
default=None,
help="cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)",
)
parser.add_argument(
"--mixup-prob",
type=float,
default=1.0,
help="Probability of performing mixup or cutmix when either/both is enabled",
)
parser.add_argument(
"--mixup-switch-prob",
type=float,
default=0.5,
help="Probability of switching to cutmix when both mixup and cutmix enabled",
)
parser.add_argument(
"--mixup-mode",
type=str,
default="batch",
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"',
)
parser.add_argument(
"--mixup-off-epoch",
default=0,
type=int,
metavar="N",
help="Turn off mixup after this epoch, disabled if 0 (default: 0)",
)
parser.add_argument(
"--smoothing", type=float, default=0.1, help="Label smoothing (default: 0.1)"
)
parser.add_argument(
"--train-interpolation",
type=str,
default="random",
help='Training interpolation (random, bilinear, bicubic default: "random")',
)
parser.add_argument(
"--drop", type=float, default=0.0, metavar="PCT", help="Dropout rate (default: 0.)"
)
parser.add_argument(
"--drop-connect",
type=float,
default=None,
metavar="PCT",
help="Drop connect rate, DEPRECATED, use drop-path (default: None)",
)
parser.add_argument(
"--drop-path",
type=float,
default=0.2,
metavar="PCT",
help="Drop path rate (default: None)",
)
parser.add_argument(
"--drop-block",
type=float,
default=None,
metavar="PCT",
help="Drop block rate (default: None)",
)
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument(
"--bn-tf",
action="store_true",
default=False,
help="Use Tensorflow BatchNorm defaults for models that support it (default: False)",
)
parser.add_argument(
"--bn-momentum",
type=float,
default=None,
help="BatchNorm momentum override (if not None)",
)
parser.add_argument(
"--bn-eps",
type=float,
default=None,
help="BatchNorm epsilon override (if not None)",
)
parser.add_argument(
"--sync-bn",
action="store_true",
help="Enable NVIDIA Apex or Torch synchronized BatchNorm.",
)
parser.add_argument(
"--dist-bn",
type=str,
default="",
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")',
)
parser.add_argument(
"--split-bn",
action="store_true",
help="Enable separate BN layers per augmentation split.",
)
parser.add_argument(
"--linear-prob",
action="store_true",
help="",
)
# Model Exponential Moving Average
parser.add_argument(
"--model-ema",
action="store_true",
default=False,
help="Enable tracking moving average of model weights",
)
parser.add_argument(
"--model-ema-force-cpu",
action="store_true",
default=False,
help="Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.",
)
parser.add_argument(
"--model-ema-decay",
type=float,
default=0.9998,
help="decay factor for model weights moving average (default: 0.9998)",
)
# Misc
parser.add_argument(
"--seed", type=int, default=42, metavar="S", help="random seed (default: 42)"
)
parser.add_argument(
"--log-interval",
type=int,
default=100,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--recovery-interval",
type=int,
default=0,
metavar="N",
help="how many batches to wait before writing recovery checkpoint",
)
parser.add_argument(
"--checkpoint-hist",
type=int,
default=10,
metavar="N",
help="number of checkpoints to keep (default: 10)",
)
parser.add_argument(
"-j",
"--workers",
type=int,
default=4,
metavar="N",
help="how many training processes to use (default: 1)",
)
parser.add_argument(
"--save-images",
action="store_true",
default=False,
help="save images of input bathes every log interval for debugging",
)
parser.add_argument(
"--amp",
action="store_true",
default=False,
help="use NVIDIA Apex AMP or Native AMP for mixed precision training",
)
parser.add_argument(
"--apex-amp",
action="store_true",
default=False,
help="Use NVIDIA Apex AMP mixed precision",
)
parser.add_argument(
"--native-amp",
action="store_true",
default=False,
help="Use Native Torch AMP mixed precision",
)
parser.add_argument(
"--channels-last",
action="store_true",
default=False,
help="Use channels_last memory layout",
)
parser.add_argument(
"--pin-mem",
action="store_true",
default=False,
help="Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.",
)
parser.add_argument(
"--no-prefetcher",
action="store_true",
default=False,
help="disable fast prefetcher",
)
parser.add_argument(
"--dvs-aug",
action="store_true",
default=False,
help="disable fast prefetcher",
)
parser.add_argument(
"--dvs-trival-aug",
action="store_true",
default=False,
help="disable fast prefetcher",
)
parser.add_argument(
"--output",
default="",
type=str,
metavar="PATH",
help="path to output folder (default: none, current dir)",
)
parser.add_argument(
"--experiment",
default="",
type=str,
metavar="NAME",
help="name of train experiment, name of sub-folder for output",
)
parser.add_argument(
"--eval-metric",
default="top1",
type=str,
metavar="EVAL_METRIC",
help='Best metric (default: "top1")',
)
parser.add_argument(
"--tta",
type=int,
default=0,
metavar="N",
help="Test/inference time augmentation (oversampling) factor. 0=None (default: 0)",
)
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument(
"--use-multi-epochs-loader",
action="store_true",
default=False,
help="use the multi-epochs-loader to save time at the beginning of every epoch",
)
parser.add_argument(
"--torchscript",
dest="torchscript",
action="store_true",
help="convert model torchscript for inference",
)
parser.add_argument(
"--log-wandb",
action="store_true",
default=False,
help="log training and validation metrics to wandb",
)
_logger = logging.getLogger("train")
stream_handler = logging.StreamHandler()
format_str = "%(asctime)s %(levelname)s: %(message)s"
stream_handler.setFormatter(logging.Formatter(format_str))
_logger.addHandler(stream_handler)
_logger.propagate = False
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, "r") as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
setup_default_logging()
args, args_text = _parse_args()
if args.log_wandb:
if has_wandb:
wandb.init(project=args.experiment, config=args)
else:
_logger.warning(
"You've requested to log metrics to wandb but package not found. "
"Metrics not being logged to wandb, try `pip install wandb`"
)
args.prefetcher = not args.no_prefetcher
args.distributed = False
if "WORLD_SIZE" in os.environ:
args.distributed = int(os.environ["WORLD_SIZE"]) > 1
args.device = "cuda:1"
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.device = "cuda:%d" % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
_logger.info(
"Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d."
% (args.rank, args.world_size)
)
else:
_logger.info("Training with a single process on 1 GPUs.")
assert args.rank >= 0
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
if args.amp:
# `--amp` chooses native amp before apex (APEX ver not actively maintained)
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
if args.apex_amp and has_apex:
use_amp = "apex"
elif args.native_amp and has_native_amp:
use_amp = "native"
elif args.apex_amp or args.native_amp:
_logger.warning(
"Neither APEX or native Torch AMP is available, using float32. "
"Install NVIDA apex or upgrade to PyTorch 1.6"
)
torch.backends.cudnn.benchmark = True
os.environ["PYTHONHASHSEED"] = str(args.seed)
np.random.seed(args.seed)
torch.initial_seed() # dataloader multi processing
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
random_seed(args.seed, args.rank)
args.dvs_mode = False
if args.dataset in ["cifar10-dvs-tet", "cifar10-dvs"]:
args.dvs_mode = True
model = create_model(
args.model,
T=args.time_steps,
pretrained=args.pretrained,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
num_heads=args.num_heads,
num_classes=args.num_classes,
pooling_stat=args.pooling_stat,
img_size_h=args.img_size,
img_size_w=args.img_size,
patch_size=args.patch_size,
embed_dims=args.dim,
mlp_ratios=args.mlp_ratio,
in_channels=args.in_channels,
qkv_bias=False,
depths=args.layer,
sr_ratios=1,
spike_mode=args.spike_mode,
dvs_mode=args.dvs_mode,
TET=args.TET,
)
if args.local_rank == 0:
_logger.info(f"Creating model {args.model}")
_logger.info(
str(
torchinfo.summary(
model, (2, args.in_channels, args.img_size, args.img_size)
)
)
)
if args.num_classes is None:
assert hasattr(
model, "num_classes"
), "Model must have `num_classes` attr if not set on cmd line/config."
args.num_classes = (
model.num_classes
) # FIXME handle model default vs config num_classes more elegantly
data_config = resolve_data_config(
vars(args), model=model, verbose=args.local_rank == 0
)
output_dir = None
if args.rank == 0:
if args.experiment:
exp_name = args.experiment
else:
exp_name = "-".join(
[
datetime.now().strftime("%Y%m%d-%H%M%S"),
safe_model_name(args.model),
"data-" + args.dataset.split("/")[-1],
f"t-{args.time_steps}",
f"spike-{args.spike_mode}",
]
)
output_dir = get_outdir(
args.output if args.output else "./output/train", exp_name
)
file_handler = logging.FileHandler(
os.path.join(output_dir, f"{args.model}.log"), "w"
)
file_handler.setFormatter(logging.Formatter(format_str))
file_handler.setLevel(logging.INFO)
_logger.addHandler(file_handler)
if args.local_rank == 0:
_logger.info(
f"Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}"
)
# setup augmentation batch splits for contrastive loss or split bn
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, "A split of 1 makes no sense"
num_aug_splits = args.aug_splits
# enable split bn (separate bn stats per batch-portion)
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
# move model to GPU, enable channels last layout if set
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
# setup synchronized BatchNorm for distributed training
if args.distributed and args.sync_bn:
assert not args.split_bn
if has_apex and use_amp != "native":
# Apex SyncBN preferred unless native amp is activated
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
_logger.info(
"Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using "
"zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled."
)
if args.torchscript:
assert not use_amp == "apex", "Cannot use APEX AMP with torchscripted model"
assert not args.sync_bn, "Cannot use SyncBatchNorm with torchscripted model"
model = torch.jit.script(model)
optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args))
# setup automatic mixed-precision (AMP) loss scaling and op casting
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == "apex":
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
loss_scaler = ApexScaler()
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX AMP. Training in mixed precision.")
elif use_amp == "native":
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.local_rank == 0:
_logger.info("Using native Torch AMP. Training in mixed precision.")
else:
if args.local_rank == 0:
_logger.info("AMP not enabled. Training in float32.")
# optionally resume from a checkpoint
resume_epoch = None
if args.resume:
resume_epoch = resume_checkpoint(
model,
args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=args.local_rank == 0,
)
# setup exponential moving average of model weights, SWA could be used here too
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEmaV2(
model,
decay=args.model_ema_decay,
device="cpu" if args.model_ema_force_cpu else None,
)
if args.resume:
load_checkpoint(model_ema.module, args.resume, use_ema=True)
# setup distributed training
if args.distributed:
if has_apex and use_amp != "native":
# Apex DDP preferred unless native amp is activated
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True, find_unused_parameters=True)
else:
if args.local_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(
model, device_ids=[args.local_rank], find_unused_parameters=True
) # can use device str in Torch >= 1.1
# NOTE: EMA model does not need to be wrapped by DDP
# for linear prob
if args.linear_prob:
for n, p in model.module.named_parameters():
if "patch_embed" in n:
p.requires_grad = False
# if "block" in n:
# p.requires_grad = False
# setup learning rate schedule and starting epoch
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None and (not args.linear_prob):
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
_logger.info("Scheduled epochs: {}".format(num_epochs))
transforms_train, transforms_eval = None, None
# create the train and eval datasets
dataset_train, dataset_eval = None, None
if args.dataset == "cifar10-dvs-tet":
dataset_train = dvs_utils.DVSCifar10(
root=os.path.join(args.data_dir, "train"),
train=True,
)
dataset_eval = dvs_utils.DVSCifar10(
root=os.path.join(args.data_dir, "test"),
train=False,
)
elif args.dataset == "cifar10-dvs":
dataset = CIFAR10DVS(
args.data_dir,
data_type="frame",
frames_number=args.time_steps,
split_by="number",
transform=dvs_utils.Resize(64),
)
dataset_train, dataset_eval = dvs_utils.split_to_train_test_set(
0.9, dataset, 10
)
elif args.dataset == "gesture":
dataset_train = DVS128Gesture(
args.data_dir,
train=True,
data_type="frame",
frames_number=args.time_steps,
split_by="number",
)
dataset_eval = DVS128Gesture(
args.data_dir,
train=False,
data_type="frame",
frames_number=args.time_steps,
split_by="number",
)
elif args.dataset == "dailyaction":
dataset_train = create_action_datasets(
args.data_dir,
train=True,
ds=2,
dt=100 * 1000,
T=args.time_steps,
)
dataset_eval = create_action_datasets(
args.data_dir,
train=False,
ds=2,
dt=1 * 1000,
T=args.time_steps,
)
elif args.dataset == "thu_chl":
dataset_train = create_thu_chl_datasets(
args.data_dir,
train=True,
event_augmentation=False,
repr=["eventAccuFrame", "timeSurface"],
time_num=args.time_steps,
voxel=True,
)
dataset_eval = create_thu_chl_datasets(
args.data_dir,
train=False,
event_augmentation=False,
repr=["eventAccuFrame", "timeSurface"],
time_num=args.time_steps,
voxel=True,
)
elif args.dataset == "dvs_lip":
dataset_train = create_dvs_lip_datasets(
args.data_dir,
phase='train',
event_augmentation=False,
repr=["eventAccuFrame", "timeSurface"],
time_num=args.time_steps,
native=True
)
dataset_eval = create_dvs_lip_datasets(
args.data_dir,
phase='test',
event_augmentation=False,
repr=["eventAccuFrame", "timeSurface"],
time_num=args.time_steps,
native=True
)
else:
dataset_train = create_dataset(
args.dataset,
root=args.data_dir,
split=args.train_split,
is_training=True,
batch_size=args.batch_size,
repeats=args.epoch_repeats,
transform=transforms_train,
# download=True,
)
dataset_eval = create_dataset(
args.dataset,
root=args.data_dir,
split=args.val_split,
is_training=False,
batch_size=args.batch_size,
transform=transforms_eval,
# download=True,
)
# setup mixup / cutmix
collate_fn = None
train_dvs_aug, train_dvs_trival_aug = None, None
if args.dvs_aug:
train_dvs_aug = dvs_utils.Cutout(n_holes=1, length=16)
if args.dvs_trival_aug:
train_dvs_trival_aug = dvs_utils.SNNAugmentWide()
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0.0 or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup,
cutmix_alpha=args.cutmix,
cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob,
switch_prob=args.mixup_switch_prob,
mode=args.mixup_mode,
label_smoothing=args.smoothing,
num_classes=args.num_classes,
)
if args.prefetcher and args.dataset not in dvs_utils.DVS_DATASET:
assert (
not num_aug_splits
) # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
# wrap dataset in AugMix helper
if num_aug_splits > 1 and args.dataset not in dvs_utils.DVS_DATASET:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
# create data loaders w/ augmentation pipeiine
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config["interpolation"]
loader_train, loader_eval, train_idx = None, None, None
# NOTE(hujiakui): only for ImageNet
if args.train_split_path is not None:
train_idx = np.load(args.train_split_path).tolist()
if args.dataset in dvs_utils.DVS_DATASET:
loader_train = torch.utils.data.DataLoader(
dataset_train,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
)
loader_eval = torch.utils.data.DataLoader(
dataset_eval,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
else:
loader_train = create_loader(
dataset_train,
input_size=data_config["input_size"],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config["mean"],
std=data_config["std"],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader,
# train_idx=train_idx,
)
# NOTE(hujiakui): train_idx should modify the code of timm
loader_eval = create_loader(
dataset_eval,
input_size=data_config["input_size"],
batch_size=args.val_batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config["interpolation"],
mean=data_config["mean"],
std=data_config["std"],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config["crop_pct"],
pin_memory=args.pin_mem,
)
if args.local_rank == 0:
_logger.info("Create dataloader: {}".format(args.dataset))
# setup loss function
if args.jsd:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(
num_splits=num_aug_splits, smoothing=args.smoothing
).cuda()
elif mixup_active:
# smoothing is handled with mixup target transform
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(target_threshold=args.bce_target_thresh)
else:
train_loss_fn = SoftTargetCrossEntropy()
elif args.smoothing:
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(
smoothing=args.smoothing, target_threshold=args.bce_target_thresh
)
else:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
train_loss_fn = nn.CrossEntropyLoss()
train_loss_fn = train_loss_fn.cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
# setup checkpoint saver and eval metric tracking
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
saver = None
if args.rank == 0:
decreasing = False
saver = CheckpointSaver(
model=model,
optimizer=optimizer,
args=args,
model_ema=model_ema,
amp_scaler=loss_scaler,
checkpoint_dir=output_dir,
recovery_dir=output_dir,
decreasing=decreasing,
max_history=args.checkpoint_hist,
)
with open(os.path.join(output_dir, "args.yaml"), "w") as f:
f.write(args_text)
try:
for epoch in range(start_epoch, num_epochs):
if args.distributed and hasattr(loader_train.sampler, "set_epoch"):
loader_train.sampler.set_epoch(epoch)
# eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast)
train_metrics = train_one_epoch(
epoch,
model,
loader_train,
optimizer,
train_loss_fn,
args,
lr_scheduler=lr_scheduler,
saver=saver,
output_dir=output_dir,
amp_autocast=amp_autocast,
loss_scaler=loss_scaler,
model_ema=model_ema,
mixup_fn=mixup_fn,
dvs_aug=train_dvs_aug,
dvs_trival_aug=train_dvs_trival_aug,
)
if args.distributed and args.dist_bn in ("broadcast", "reduce"):
if args.local_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == "reduce")
eval_metrics = validate(
model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast
)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ("broadcast", "reduce"):
distribute_bn(model_ema, args.world_size, args.dist_bn == "reduce")
ema_eval_metrics = validate(
model_ema.module,
loader_eval,
validate_loss_fn,
args,
amp_autocast=amp_autocast,
log_suffix=" (EMA)",
)
eval_metrics = ema_eval_metrics
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
if output_dir is not None:
update_summary(
epoch,
train_metrics,
eval_metrics,
os.path.join(output_dir, "summary.csv"),
write_header=best_metric is None,
log_wandb=args.log_wandb and has_wandb,
)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(
epoch, metric=save_metric
)
_logger.info(
"*** Best metric: {0} (epoch {1})".format(best_metric, best_epoch)
)
except KeyboardInterrupt:
pass
if best_metric is not None:
_logger.info("*** Best metric: {0} (epoch {1})".format(best_metric, best_epoch))
def train_one_epoch(
epoch,
model,
loader,
optimizer,
loss_fn,
args,
lr_scheduler=None,
saver=None,
output_dir=None,
amp_autocast=suppress,
loss_scaler=None,
model_ema=None,
mixup_fn=None,
dvs_aug=None,
dvs_trival_aug=None,
):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher:
if hasattr(loader, "mixup_enabled"):
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
sample_number = 0
start_time = time.time()
second_order = hasattr(optimizer, "is_second_order") and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
functional.reset_net(model)
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (input, target) in enumerate(loader):
# input.shape torch.Size([4, 10, 2, 128, 128])
# target.shape torch.Size([4])
# print("target.shape",target.size())
# print("input.shape",input.size())
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
input = input.float()
if not args.prefetcher or args.dataset in dvs_utils.DVS_DATASET:
if args.amp and not isinstance(input, torch.cuda.HalfTensor):
input = input.half()
input, target = input.cuda(), target.cuda()
if dvs_aug is not None: #None
input = dvs_aug(input)
if dvs_trival_aug is not None:#None
output = [] #(Tensor:(8,100)}
for i in range(input.shape[0]):
output.append(dvs_trival_aug(input[i]))
input = torch.stack(output)
del output
if mixup_fn is not None: #None
input, target = mixup_fn(input, target)
if args.channels_last:#False
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)#input.shape torch.Size([4, 10, 2, 128, 128]) output class tuple
output = output[0] #[4,100]
if args.TET:#False #target.shape torch.Size([4])
loss = criterion.TET_loss(
output, target, loss_fn, means=args.TET_means, lamb=args.TET_lamb
)
else: # loss:Tensor:(tensor(4.6121,device='cuda:0',grad_fn=<MeanBackwardo>)
loss = loss_fn(output, target) #output: torch.Size([4, 100]) ,target: torch.Size([4])
sample_number += input.shape[0]
if not args.distributed:
losses_m.update(loss.item(), input.size(0)) # input.size(0)=b
optimizer.zero_grad()
if loss_scaler is not None: # GradScaler {NativeScaler}<timm.utils.cuda.NativeScaler object at 0x00000276A2572DCO>
loss_scaler(
loss,
optimizer,#Lamb
clip_grad=args.clip_grad,#None
clip_mode=args.clip_mode,#norm
parameters=model_parameters(
model, exclude_head="agc" in args.clip_mode
),
create_graph=second_order,#False
)
else:
# loss.backward()
loss.backward(create_graph=second_order)
if args.clip_grad is not None:
dispatch_clip_grad(
model_parameters(model, exclude_head="agc" in args.clip_mode),
value=args.clip_grad,
mode=args.clip_mode,
)
optimizer.step()
functional.reset_net(model)
if model_ema is not None:#None
model_ema.update(model)
functional.reset_net(model_ema)
torch.cuda.synchronize()
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group["lr"] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if args.local_rank == 0:
_logger.info(
"Train: {} [{:>4d}/{} ({:>3.0f}%)] "
"Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) "
"Time: {batch_time.val:.3f}s, {rate:>7.2f}/s "
"({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) "
"LR: {lr:.3e} "
"Data: {data_time.val:.3f} ({data_time.avg:.3f})".format(
epoch,
batch_idx,
len(loader),
100.0 * batch_idx / last_idx,
loss=losses_m,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m,
)
)
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, "train-batch-%d.jpg" % batch_idx),
padding=0,
normalize=True,
)
if (
saver is not None
and args.recovery_interval
and (last_batch or (batch_idx + 1) % args.recovery_interval == 0)
):
saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
# end for
if hasattr(optimizer, "sync_lookahead"):
optimizer.sync_lookahead()
if args.local_rank == 0:
_logger.info(f"samples / s = {sample_number / (time.time() - start_time): .3f}")
return OrderedDict([("loss", losses_m.avg)])
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=""):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
# functional.reset_net(model)
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
input = input.float()
if (target >= 1000).sum() != 0 or (target < 0).sum() != 0:
print(target)
last_batch = batch_idx == last_idx
if not args.prefetcher or args.dataset in dvs_utils.DVS_DATASET:
if args.amp and not isinstance(input, torch.cuda.HalfTensor):
input = input.half()
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
if args.TET:
output = output.mean(0)
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0 : target.size(0) : reduce_factor]
if (target >= 1000).sum() != 0 or (target < 0).sum() != 0:
print(target)
loss = loss_fn(output, target)
functional.reset_net(model)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (
last_batch or batch_idx % args.log_interval == 0
):
log_name = "Test" + log_suffix
_logger.info(
"{0}: [{1:>4d}/{2}] "
"Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) "
"Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) "
"Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) "
"Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})".format(
log_name,
batch_idx,
last_idx,
batch_time=batch_time_m,
loss=losses_m,
top1=top1_m,
top5=top5_m,
)
)
metrics = OrderedDict(
[("loss", losses_m.avg), ("top1", top1_m.avg), ("top5", top5_m.avg)]
)
return metrics
if __name__ == "__main__":
main()
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化