From bceca7f87723d9f5da7b2bd2ab0f5b322accc67a Mon Sep 17 00:00:00 2001 From: Shaoshuai Shi Date: Mon, 27 Dec 2021 01:04:54 +0100 Subject: [PATCH] bugfixed: stuck when training with dist_train.sh --- pcdet/utils/common_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pcdet/utils/common_utils.py b/pcdet/utils/common_utils.py index 0ad902149..837c34af2 100644 --- a/pcdet/utils/common_utils.py +++ b/pcdet/utils/common_utils.py @@ -166,9 +166,9 @@ def init_dist_pytorch(tcp_port, local_rank, backend='nccl'): torch.cuda.set_device(local_rank % num_gpus) dist.init_process_group( backend=backend, - init_method='tcp://127.0.0.1:%d' % tcp_port, - rank=local_rank, - world_size=num_gpus + # init_method='tcp://127.0.0.1:%d' % tcp_port, + # rank=local_rank, + # world_size=num_gpus ) rank = dist.get_rank() return num_gpus, rank