@@ -35,19 +35,19 @@ def setup_common_training_handlers(
35
35
trainer : Engine ,
36
36
train_sampler : Optional [DistributedSampler ] = None ,
37
37
to_save : Optional [Dict [str , Any ]] = None ,
38
- save_every_iters : Optional [ int ] = 1000 ,
38
+ save_every_iters : int = 1000 ,
39
39
output_path : Optional [str ] = None ,
40
40
lr_scheduler : Optional [Union [ParamScheduler , _LRScheduler ]] = None ,
41
- with_gpu_stats : Optional [ bool ] = False ,
41
+ with_gpu_stats : bool = False ,
42
42
output_names : Optional [Iterable [str ]] = None ,
43
- with_pbars : Optional [ bool ] = True ,
44
- with_pbar_on_iters : Optional [ bool ] = True ,
45
- log_every_iters : Optional [ int ] = 100 ,
43
+ with_pbars : bool = True ,
44
+ with_pbar_on_iters : bool = True ,
45
+ log_every_iters : int = 100 ,
46
46
device : Optional [Union [str , torch .device ]] = None ,
47
- stop_on_nan : Optional [ bool ] = True ,
48
- clear_cuda_cache : Optional [ bool ] = True ,
47
+ stop_on_nan : bool = True ,
48
+ clear_cuda_cache : bool = True ,
49
49
save_handler : Optional [Union [Callable , BaseSaveHandler ]] = None ,
50
- ** kwargs : Mapping ,
50
+ ** kwargs : Any ,
51
51
):
52
52
"""Helper method to setup trainer with common handlers (it also supports distributed configuration):
53
53
- :class:`~ignite.handlers.TerminateOnNan`
@@ -126,18 +126,18 @@ class to use to store ``to_save``. See :class:`~ignite.handlers.checkpoint.Check
126
126
def _setup_common_training_handlers (
127
127
trainer : Engine ,
128
128
to_save : Optional [Dict [str , Any ]] = None ,
129
- save_every_iters : Optional [ int ] = 1000 ,
129
+ save_every_iters : int = 1000 ,
130
130
output_path : Optional [str ] = None ,
131
131
lr_scheduler : Optional [Union [ParamScheduler , _LRScheduler ]] = None ,
132
- with_gpu_stats : Optional [ bool ] = False ,
132
+ with_gpu_stats : bool = False ,
133
133
output_names : Optional [Iterable [str ]] = None ,
134
- with_pbars : Optional [ bool ] = True ,
135
- with_pbar_on_iters : Optional [ bool ] = True ,
136
- log_every_iters : Optional [ int ] = 100 ,
137
- stop_on_nan : Optional [ bool ] = True ,
138
- clear_cuda_cache : Optional [ bool ] = True ,
134
+ with_pbars : bool = True ,
135
+ with_pbar_on_iters : bool = True ,
136
+ log_every_iters : int = 100 ,
137
+ stop_on_nan : bool = True ,
138
+ clear_cuda_cache : bool = True ,
139
139
save_handler : Optional [Union [Callable , BaseSaveHandler ]] = None ,
140
- ** kwargs : Mapping ,
140
+ ** kwargs : Any ,
141
141
):
142
142
if output_path is not None and save_handler is not None :
143
143
raise ValueError (
@@ -208,18 +208,18 @@ def _setup_common_distrib_training_handlers(
208
208
trainer : Engine ,
209
209
train_sampler : Optional [DistributedSampler ] = None ,
210
210
to_save : Optional [Dict [str , Any ]] = None ,
211
- save_every_iters : Optional [ int ] = 1000 ,
211
+ save_every_iters : int = 1000 ,
212
212
output_path : Optional [str ] = None ,
213
213
lr_scheduler : Optional [Union [ParamScheduler , _LRScheduler ]] = None ,
214
- with_gpu_stats : Optional [ bool ] = False ,
214
+ with_gpu_stats : bool = False ,
215
215
output_names : Optional [Iterable [str ]] = None ,
216
- with_pbars : Optional [ bool ] = True ,
217
- with_pbar_on_iters : Optional [ bool ] = True ,
218
- log_every_iters : Optional [ int ] = 100 ,
219
- stop_on_nan : Optional [ bool ] = True ,
220
- clear_cuda_cache : Optional [ bool ] = True ,
216
+ with_pbars : bool = True ,
217
+ with_pbar_on_iters : bool = True ,
218
+ log_every_iters : int = 100 ,
219
+ stop_on_nan : bool = True ,
220
+ clear_cuda_cache : bool = True ,
221
221
save_handler : Optional [Union [Callable , BaseSaveHandler ]] = None ,
222
- ** kwargs : Mapping ,
222
+ ** kwargs : Any ,
223
223
):
224
224
225
225
_setup_common_training_handlers (
@@ -265,9 +265,9 @@ def setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, lo
265
265
def _setup_logging (
266
266
logger : BaseLogger ,
267
267
trainer : Engine ,
268
- optimizers : Optional [ Union [Optimizer , Dict [str , Optimizer ] ]],
269
- evaluators : Optional [ Union [Engine , Dict [str , Engine ] ]],
270
- log_every_iters : Optional [ int ] ,
268
+ optimizers : Union [Optimizer , Dict [str , Optimizer ]],
269
+ evaluators : Union [Engine , Dict [str , Engine ]],
270
+ log_every_iters : int ,
271
271
):
272
272
if optimizers is not None :
273
273
if not isinstance (optimizers , (Optimizer , Mapping )):
@@ -312,8 +312,8 @@ def setup_tb_logging(
312
312
trainer : Engine ,
313
313
optimizers : Optional [Union [Optimizer , Dict [str , Optimizer ]]] = None ,
314
314
evaluators : Optional [Union [Engine , Dict [str , Engine ]]] = None ,
315
- log_every_iters : Optional [ int ] = 100 ,
316
- ** kwargs : Mapping ,
315
+ log_every_iters : int = 100 ,
316
+ ** kwargs : Any ,
317
317
):
318
318
"""Method to setup TensorBoard logging on trainer and a list of evaluators. Logged metrics are:
319
319
- Training metrics, e.g. running average loss values
@@ -343,8 +343,8 @@ def setup_visdom_logging(
343
343
trainer : Engine ,
344
344
optimizers : Optional [Union [Optimizer , Dict [str , Optimizer ]]] = None ,
345
345
evaluators : Optional [Union [Engine , Dict [str , Engine ]]] = None ,
346
- log_every_iters : Optional [ int ] = 100 ,
347
- ** kwargs : Mapping ,
346
+ log_every_iters : int = 100 ,
347
+ ** kwargs : Any ,
348
348
):
349
349
"""Method to setup Visdom logging on trainer and a list of evaluators. Logged metrics are:
350
350
- Training metrics, e.g. running average loss values
@@ -373,8 +373,8 @@ def setup_mlflow_logging(
373
373
trainer : Engine ,
374
374
optimizers : Optional [Union [Optimizer , Dict [str , Optimizer ]]] = None ,
375
375
evaluators : Optional [Union [Engine , Dict [str , Engine ]]] = None ,
376
- log_every_iters : Optional [ int ] = 100 ,
377
- ** kwargs : Mapping ,
376
+ log_every_iters : int = 100 ,
377
+ ** kwargs : Any ,
378
378
):
379
379
"""Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are:
380
380
- Training metrics, e.g. running average loss values
@@ -403,8 +403,8 @@ def setup_neptune_logging(
403
403
trainer : Engine ,
404
404
optimizers : Optional [Union [Optimizer , Dict [str , Optimizer ]]] = None ,
405
405
evaluators : Optional [Union [Engine , Dict [str , Engine ]]] = None ,
406
- log_every_iters : Optional [ int ] = 100 ,
407
- ** kwargs : Mapping ,
406
+ log_every_iters : int = 100 ,
407
+ ** kwargs : Any ,
408
408
):
409
409
"""Method to setup Neptune logging on trainer and a list of evaluators. Logged metrics are:
410
410
- Training metrics, e.g. running average loss values
@@ -433,8 +433,8 @@ def setup_wandb_logging(
433
433
trainer : Engine ,
434
434
optimizers : Optional [Union [Optimizer , Dict [str , Optimizer ]]] = None ,
435
435
evaluators : Optional [Union [Engine , Dict [str , Engine ]]] = None ,
436
- log_every_iters : Optional [ int ] = 100 ,
437
- ** kwargs : Mapping ,
436
+ log_every_iters : int = 100 ,
437
+ ** kwargs : Any ,
438
438
):
439
439
"""Method to setup WandB logging on trainer and a list of evaluators. Logged metrics are:
440
440
- Training metrics, e.g. running average loss values
@@ -463,8 +463,8 @@ def setup_plx_logging(
463
463
trainer : Engine ,
464
464
optimizers : Optional [Union [Optimizer , Dict [str , Optimizer ]]] = None ,
465
465
evaluators : Optional [Union [Engine , Dict [str , Engine ]]] = None ,
466
- log_every_iters : Optional [ int ] = 100 ,
467
- ** kwargs : Mapping ,
466
+ log_every_iters : int = 100 ,
467
+ ** kwargs : Any ,
468
468
):
469
469
"""Method to setup Polyaxon logging on trainer and a list of evaluators. Logged metrics are:
470
470
- Training metrics, e.g. running average loss values
@@ -493,8 +493,8 @@ def setup_trains_logging(
493
493
trainer : Engine ,
494
494
optimizers : Optional [Union [Optimizer , Dict [str , Optimizer ]]] = None ,
495
495
evaluators : Optional [Union [Engine , Dict [str , Engine ]]] = None ,
496
- log_every_iters : Optional [ int ] = 100 ,
497
- ** kwargs : Mapping ,
496
+ log_every_iters : int = 100 ,
497
+ ** kwargs : Any ,
498
498
):
499
499
"""Method to setup Trains logging on trainer and a list of evaluators. Logged metrics are:
500
500
- Training metrics, e.g. running average loss values
@@ -532,10 +532,10 @@ def gen_save_best_models_by_val_score(
532
532
evaluator : Engine ,
533
533
models : torch .nn .Module ,
534
534
metric_name : str ,
535
- n_saved : Optional [ int ] = 3 ,
535
+ n_saved : int = 3 ,
536
536
trainer : Optional [Engine ] = None ,
537
- tag : Optional [ str ] = "val" ,
538
- ** kwargs : Mapping ,
537
+ tag : str = "val" ,
538
+ ** kwargs : Any ,
539
539
):
540
540
"""Method adds a handler to ``evaluator`` to save ``n_saved`` of best models based on the metric
541
541
(named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``).
@@ -590,10 +590,10 @@ def save_best_model_by_val_score(
590
590
evaluator : Engine ,
591
591
model : torch .nn .Module ,
592
592
metric_name : str ,
593
- n_saved : Optional [ int ] = 3 ,
593
+ n_saved : int = 3 ,
594
594
trainer : Optional [Engine ] = None ,
595
- tag : Optional [ str ] = "val" ,
596
- ** kwargs : Mapping ,
595
+ tag : str = "val" ,
596
+ ** kwargs : Any ,
597
597
):
598
598
"""Method adds a handler to ``evaluator`` to save on a disk ``n_saved`` of best models based on the metric
599
599
(named by ``metric_name``) provided by ``evaluator`` (i.e. ``evaluator.state.metrics[metric_name]``).
0 commit comments