forked from zju3dv/OnePose_Plus_Plus
-
Notifications
You must be signed in to change notification settings - Fork 1
/
train_onepose_plus.py
109 lines (88 loc) · 3.38 KB
/
train_onepose_plus.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
from pytorch_lightning import LightningModule, Callback, Trainer
from pytorch_lightning import seed_everything
from pytorch_lightning.loggers import LightningLoggerBase
import math
import torch
import hydra
from omegaconf import DictConfig
from typing import List
from src.utils import template_utils as utils
import warnings
warnings.filterwarnings("ignore")
def train(config: DictConfig):
if config["print_config"]:
utils.print_config(config)
if "seed" in config:
seed_everything(config["seed"])
# scale lr and warmup-step automatically
if not isinstance(config.trainer.gpus, (int, str)):
# List type
_n_gpus = len(config.trainer.gpus)
else:
_n_gpus = (
int(config.trainer.gpus)
if "," not in str(config.trainer.gpus)
else len([num for num in config.trainer.gpus.split(",") if num != ""])
)
_n_gpus = _n_gpus if _n_gpus != -1 else torch.cuda.device_count()
config.model.trainer.world_size = _n_gpus * config.trainer.num_nodes
true_batch_size = config.model.trainer.world_size * config.datamodule.batch_size
_scaling = true_batch_size / config.model.trainer.canonical_bs
# config.model.trainer.scaling= _scaling
config.model.trainer.true_lr = config.model.trainer.canonical_lr * _scaling
config.model.loss.fine_weight = (
0.25 * (config.model.OnePosePlus.loftr_fine.window_size / 5) ** 2
)
# Init PyTorch Lightning model ⚡
model: LightningModule = hydra.utils.instantiate(config["model"])
# Init PyTorch Lightning datamodule ⚡
datamodule: LightningModule = hydra.utils.instantiate(config["datamodule"])
datamodule.setup()
# Init PyTorch Lightning callbacks ⚡
callbacks: List[Callback] = []
if "callbacks" in config:
for _, cb_conf in config["callbacks"].items():
if "_target_" in cb_conf:
callbacks.append(hydra.utils.instantiate(cb_conf))
# Init PyTorch Lightning loggers ⚡
logger: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config["logger"].items():
if "_target_" in lg_conf:
logger.append(hydra.utils.instantiate(lg_conf))
# Init PyTorch Lightning trainer ⚡
trainer: Trainer = hydra.utils.instantiate(
# config["trainer"], callbacks=callbacks, logger=logger, plugins=DDPPlugin(find_unused_parameters=False)
config["trainer"],
callbacks=callbacks,
logger=logger,
)
# Send some parameters from config to all lightning loggers
utils.log_hparams_to_all_loggers(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
# Train the model
trainer.fit(model=model, datamodule=datamodule)
# Make sure everything closed properly
utils.finish(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
# Return best achieved metric score for optuna
optimized_metric = config.get("optimized_metric", None)
if optimized_metric:
return trainer.callback_metrics[optimized_metric]
@hydra.main(version_base=None, config_path="configs/", config_name="config.yaml")
def main(config: DictConfig):
return train(config)
if __name__ == "__main__":
main()