We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent a848347 commit ec8a172Copy full SHA for ec8a172
imagenet/main.py
@@ -106,12 +106,8 @@ def main():
106
107
if torch.cuda.is_available():
108
ngpus_per_node = torch.cuda.device_count()
109
- assert not (ngpus_per_node == 1 and args.dist_backend == "nccl"),\
110
- "nccl backend requires GPU count>1, see https://github.com/NVIDIA/nccl/issues/103 perhaps use 'gloo'"
111
else:
112
- ngpus_per_node = 0
113
- assert args.dist_backend != "nccl",\
114
- "nccl backend does not work without GPU, see https://pytorch.org/docs/stable/distributed.html"
+ ngpus_per_node = 1
115
if args.multiprocessing_distributed:
116
# Since we have ngpus_per_node processes per node, the total world_size
117
# needs to be adjusted accordingly
0 commit comments