-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
executable file
·157 lines (132 loc) · 5.93 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
# Copyright 2022-present, Lorenzo Bonicelli, Pietro Buzzega, Matteo Boschini, Angelo Porrello, Simone Calderara.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy # needed (don't change it)
import importlib
import os
import socket
import sys
mammoth_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(mammoth_path)
sys.path.append(mammoth_path + '/datasets')
sys.path.append(mammoth_path + '/backbone')
sys.path.append(mammoth_path + '/models')
import datetime
import uuid
from argparse import ArgumentParser
import setproctitle
import torch
from datasets import NAMES as DATASET_NAMES
from datasets import ContinualDataset, get_dataset
from models import get_all_models, get_model
from utils.args import add_management_args, add_np_args
from utils.best_args import best_args
from utils.conf import set_random_seed
from utils.continual_training import train as ctrain
from utils.distributed import make_dp
from utils.training import train
def lecun_fix():
# Yann moved his website to CloudFlare. You need this now
from six.moves import urllib # pyright: ignore
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
def parse_args():
parser = ArgumentParser(description='mammoth', allow_abbrev=False)
parser.add_argument('--model', type=str, required=True,
help='Model name.', choices=get_all_models())
parser.add_argument('--load_best_args', action='store_true',
help='Loads the best arguments for each method, '
'dataset and memory buffer.')
torch.set_num_threads(4)
add_management_args(parser)
add_np_args(parser)
args = parser.parse_known_args()[0]
mod = importlib.import_module('models.' + args.model)
if args.load_best_args:
parser.add_argument('--dataset', type=str, required=True,
choices=DATASET_NAMES,
help='Which dataset to perform experiments on.')
if hasattr(mod, 'Buffer'):
parser.add_argument('--buffer_size', type=int, required=True,
help='The size of the memory buffer.')
args = parser.parse_args()
if args.model == 'joint':
best = best_args[args.dataset]['sgd']
else:
best = best_args[args.dataset][args.model]
if hasattr(mod, 'Buffer'):
best = best[args.buffer_size]
else:
best = best[-1]
get_parser = getattr(mod, 'get_parser')
parser = get_parser()
to_parse = sys.argv[1:] + ['--' + k + '=' + str(v) for k, v in best.items()]
to_parse.remove('--load_best_args')
args = parser.parse_args(to_parse)
if args.model == 'joint' and args.dataset == 'mnist-360':
args.model = 'joint_gcl'
else:
get_parser = getattr(mod, 'get_parser')
parser = get_parser()
args = parser.parse_args()
if args.seed is not None:
set_random_seed(args.seed)
return args
def print_args_settings(args, dataset):
if args.use_context:
print(f"Using context with:\n\t a. batch factor: {args.context_batch_factor}\n\t b. num labels: {args.num_labels}")
else:
print(f"Without using context")
if 'np' in args.np_type:
print(f"Distribution loss settings: ")
print(f"\tKL losses for posterior approximation: kl-t = {args.kl_t}, kl-g = {args.kl_g}")
print(f"\tKD losses for countering forgetting: kd-gr = {args.kd_gr}, kd-tr = {args.kd_tr}")
print(f"\t{'Computing KD on context too' if args.kd_context else ''}")
print(f"\t{'Using KL warmup' if args.kl_warmup else 'No KL warmup'}")
if args.np_type == 'clnp':
print(f"Stochasticity type for CLNP: {args.clnp_stochasticity}")
print(f"Portion of LR warmup steps: {args.warmup_portion}")
print(f"Num of tasks: {dataset.N_TASKS}, Num of classes per task: {dataset.N_CLASSES_PER_TASK}")
def main(args=None):
lecun_fix()
if args is None:
args = parse_args()
os.putenv("MKL_SERVICE_FORCE_INTEL", "1")
os.putenv("NPY_MKL_FORCE_INTEL", "1")
# Add uuid, timestamp and hostname for logging
args.conf_jobnum = str(uuid.uuid4())
args.conf_timestamp = str(datetime.datetime.now())
args.conf_host = socket.gethostname()
dataset = get_dataset(args)
print_args_settings(args, dataset)
if args.n_epochs is None and isinstance(dataset, ContinualDataset):
args.n_epochs = dataset.get_epochs()
if args.batch_size is None:
args.batch_size = dataset.get_batch_size()
if hasattr(importlib.import_module('models.' + args.model), 'Buffer') and args.minibatch_size is None:
args.minibatch_size = dataset.get_minibatch_size()
backbone = dataset.get_backbone()
np_head = dataset.get_np_head(args.np_type, args.test_oracle_npcl) if len(args.np_type) else None
loss = dataset.get_loss(args.np_type)
model = get_model(args, np_head, backbone, loss, dataset.get_transform())
# pytorch_total_params = sum(p.numel() for p in model.net.parameters())
if args.distributed == 'dp':
model.net = make_dp(model.net)
model.to('cuda:0')
args.conf_ngpus = torch.cuda.device_count()
elif args.distributed == 'ddp':
# DDP breaks the buffer, it has to be synchronized.
raise NotImplementedError('Distributed Data Parallel not supported yet.')
if args.debug_mode:
args.nowand = 1
# set job name
setproctitle.setproctitle('{}_{}_{}'.format(args.model, args.buffer_size if 'buffer_size' in args else 0, args.dataset))
if isinstance(dataset, ContinualDataset):
train(model, dataset, args)
else:
assert not hasattr(model, 'end_task') or model.NAME == 'joint_gcl'
ctrain(args)
if __name__ == '__main__':
main()