From a850ec54357b0e7151c569bf0d5268c3ca0b1ce2 Mon Sep 17 00:00:00 2001 From: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Date: Wed, 6 Sep 2023 09:32:07 -0700 Subject: [PATCH] fixed trainer.strategy=auto from None. (#7369) Signed-off-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> --- tutorials/tts/FastPitch_Finetuning.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorials/tts/FastPitch_Finetuning.ipynb b/tutorials/tts/FastPitch_Finetuning.ipynb index 794d4b71ff44..e0c34b3c0de5 100755 --- a/tutorials/tts/FastPitch_Finetuning.ipynb +++ b/tutorials/tts/FastPitch_Finetuning.ipynb @@ -294,7 +294,7 @@ " model.train_ds.dataloader_params.batch_size=24 model.validation_ds.dataloader_params.batch_size=24 \\\n", " model.n_speakers=1 model.pitch_mean=152.3 model.pitch_std=64.0 \\\n", " model.pitch_fmin=30 model.pitch_fmax=512 model.optim.lr=2e-4 \\\n", - " ~model.optim.sched model.optim.name=adam trainer.devices=1 trainer.strategy=null \\\n", + " ~model.optim.sched model.optim.name=adam trainer.devices=1 trainer.strategy=auto \\\n", " +model.text_tokenizer.add_blank_at=true \\\n", ")" ] @@ -347,10 +347,10 @@ " * We use a fixed learning rate of 2e-4.\n", " * We switch from the lamb optimizer to the adam optimizer.\n", "\n", - "* `trainer.devices=1 trainer.strategy=null`\n", + "* `trainer.devices=1 trainer.strategy=auto`\n", " * For this notebook, we default to 1 gpu which means that we do not need ddp.\n", " * If you have the compute resources, feel free to scale this up to the number of free gpus you have available.\n", - " * Please remove the `trainer.strategy=null` section if you intend on multi-gpu training." + " * Please remove the `trainer.strategy=auto` section if you intend on multi-gpu training." ] }, {