From 1debf48906d8b32bc0181e68ac1e6abbddd05430 Mon Sep 17 00:00:00 2001 From: Alan Blanchet Date: Tue, 2 Apr 2024 16:47:37 +0200 Subject: [PATCH] doc: using collate_fn collate_fn isn't set by default. We need to use pytorch's default collator. --- docs/en/user_guides/config.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/en/user_guides/config.md b/docs/en/user_guides/config.md index 69bd91194e0..ac5eb19821f 100644 --- a/docs/en/user_guides/config.md +++ b/docs/en/user_guides/config.md @@ -207,6 +207,7 @@ train_dataloader = dict( # Train dataloader config batch_size=2, # Batch size of a single GPU num_workers=2, # Worker to pre-fetch data for each single GPU persistent_workers=True, # If ``True``, the dataloader will not shut down the worker processes after an epoch end, which can accelerate training speed. + collate_fn=dict(type='default_collate'), # Required to collate a list of tensors into a tensor with a batch dimension sampler=dict( # training data sampler type='DefaultSampler', # DefaultSampler which supports both distributed and non-distributed training. Refer to https://mmengine.readthedocs.io/en/latest/api/generated/mmengine.dataset.DefaultSampler.html#mmengine.dataset.DefaultSampler shuffle=True), # randomly shuffle the training data in each epoch