# Config for multi-device full finetuning in full_finetune_distributed.py # using a Llama3.1 70B Instruct model # # This config assumes that you've run the following command before launching # this run: # tune download meta-llama/Meta-Llama-3.1-70B-Instruct --output-dir /tmp/Meta-Llama-3.1-70B-Instruct --ignore-patterns "original/consolidated*" # # To launch on 8 devices, run the following command from root: # tune run --nproc_per_node 8 full_finetune_distributed --config llama3_1/70B_full # # You can add specific overrides through the command line. For example # to override the checkpointer directory while launching training # you can run: # tune run --nproc_per_node 8 full_finetune_distributed --config llama3_1/70B_full checkpointer.checkpoint_dir= # # This config is only tested on an 8xA100 machine. # # Tokenizer tokenizer: _component_: torchtune.models.llama3.llama3_tokenizer path: ${oc.env:ARTIFACT_LOCATION}/Meta-Llama-3.1-70B-Instruct/original/tokenizer.model max_seq_len: 4096 # Dataset dataset: _component_: torchtune.datasets.alpaca_dataset packed: True # True increases speed seed: null shuffle: True # Model Arguments model: _component_: torchtune.models.llama3_1.llama3_1_70b checkpointer: _component_: torchtune.training.FullModelHFCheckpointer checkpoint_dir: ${oc.env:ARTIFACT_LOCATION}/Meta-Llama-3.1-70B-Instruct checkpoint_files: [ model-00001-of-00030.safetensors, model-00002-of-00030.safetensors, model-00003-of-00030.safetensors, model-00004-of-00030.safetensors, model-00005-of-00030.safetensors, model-00006-of-00030.safetensors, model-00007-of-00030.safetensors, model-00008-of-00030.safetensors, model-00009-of-00030.safetensors, model-00010-of-00030.safetensors, model-00011-of-00030.safetensors, model-00012-of-00030.safetensors, model-00013-of-00030.safetensors, model-00014-of-00030.safetensors, model-00015-of-00030.safetensors, model-00016-of-00030.safetensors, model-00017-of-00030.safetensors, model-00018-of-00030.safetensors, model-00019-of-00030.safetensors, model-00020-of-00030.safetensors, model-00021-of-00030.safetensors, model-00022-of-00030.safetensors, model-00023-of-00030.safetensors, model-00024-of-00030.safetensors, model-00025-of-00030.safetensors, model-00026-of-00030.safetensors, model-00027-of-00030.safetensors, model-00028-of-00030.safetensors, model-00029-of-00030.safetensors, model-00030-of-00030.safetensors, ] recipe_checkpoint: null output_dir: ${oc.env:ARTIFACT_LOCATION}/${oc.env:EXPERIMENT_NAME}/${oc.env:RUN_NAME}/checkpoints model_type: LLAMA3 resume_from_checkpoint: False # Fine-tuning arguments batch_size: 2 epochs: 1 optimizer: _component_: torch.optim.AdamW lr: 2e-5 # Note: highly recommended to use fused=True optimizer flag # with CPU offload for faster optimizer step. fused: True loss: _component_: torchtune.modules.loss.CEWithChunkedOutputLoss max_steps_per_epoch: null gradient_accumulation_steps: 1 # Use to increase virtual batch size # Training env device: cuda # Memory management enable_activation_checkpointing: True # True reduces memory enable_activation_offloading: True # True reduces memory custom_sharded_layers: ['tok_embeddings', 'output'] # Layers to shard separately (useful for large vocab size models). Lower Memory, but lower speed. fsdp_cpu_offload: True compile: True # pytorch compile, set to true for better perf/memory optimizer_in_bwd: False # True saves memory. Requires gradient_accumulation_steps=1 # Reduced precision dtype: bf16 # Logging metric_logger: # _component_: torchtune.training.metric_logging.DiskLogger _component_: custom_utils.MLflowLogger log_dir: ${output_dir} # Only used for MLflow experiment_name: ${oc.env:EXPERIMENT_NAME} # Set Experiment Name run_name: ${oc.env:RUN_NAME} # Set Run Name autolog: True # Set to False to disable autologging output_dir: ${oc.env:ARTIFACT_LOCATION}/${oc.env:EXPERIMENT_NAME}/${oc.env:RUN_NAME} log_every_n_steps: 1 log_peak_memory_stats: True # Profiler (disabled) profiler: _component_: torchtune.training.setup_torch_profiler enabled: True #Output directory of trace artifacts output_dir: ${output_dir}/profiling_outputs #`torch.profiler.ProfilerActivity` types to trace cpu: True cuda: True #trace options passed to `torch.profiler.profile` profile_memory: True with_stack: False record_shapes: True with_flops: False # `torch.profiler.schedule` options: # wait_steps -> wait, warmup_steps -> warmup, active_steps -> active, num_cycles -> repeat wait_steps: 5 warmup_steps: 3 active_steps: 2 num_cycles: 1