-
Notifications
You must be signed in to change notification settings - Fork 2.1k
[AutoParallel] Refactor llama3.1 model in intermediate api #2859
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: develop
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -27,12 +27,12 @@ | |
| from paddleformers.trainer.trainer import Trainer | ||
| from paddleformers.trainer.trainer_utils import set_seed | ||
| from paddleformers.transformers import ( | ||
| AutoConfig, | ||
| AutoModelForCausalLM, | ||
| AutoModelForCausalLMPipe, | ||
| AutoTokenizer, | ||
| CosineAnnealingWithWarmupDecay, | ||
| LinearAnnealingWithWarmupDecay, | ||
| LlamaConfig, | ||
| LlamaForCausalLMNet, | ||
| LlamaPretrainingCriterionNet, | ||
| ) | ||
| from paddleformers.transformers.configuration_utils import LlmMetaConfig | ||
| from paddleformers.utils.log import logger | ||
|
|
@@ -145,7 +145,6 @@ def __init__(self, *args, **kwargs): | |
|
|
||
|
|
||
| def run_auto_parallel(model_args, data_args, generating_args, training_args): | ||
|
|
||
| do_enable_linear_fused_grad_add = training_args.enable_linear_fused_grad_add | ||
| # do_enable_mp_async_allreduce = ( | ||
| # training_args.enable_auto_parallel | ||
|
|
@@ -203,14 +202,8 @@ def run_auto_parallel(model_args, data_args, generating_args, training_args): | |
| "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." | ||
| ) | ||
|
|
||
| # TODO: only support llama model now | ||
| config_class = LlamaConfig | ||
| model_class = LlamaForCausalLMNet | ||
| criterion_class = LlamaPretrainingCriterionNet | ||
|
|
||
| config = config_class.from_pretrained(model_args.model_name_or_path) | ||
| tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name_or_path) | ||
| # config = AutoConfig.from_pretrained(model_args.model_name_or_path) | ||
| config = AutoConfig.from_pretrained(model_args.model_name_or_path) | ||
| LlmMetaConfig.set_llm_config(config, training_args) | ||
| config.use_fast_layer_norm = model_args.use_fast_layer_norm | ||
|
|
||
|
|
@@ -276,6 +269,13 @@ def run_auto_parallel(model_args, data_args, generating_args, training_args): | |
| if training_args.no_recompute_layers is not None: | ||
| training_args.no_recompute_layers.sort() | ||
|
|
||
| if training_args.use_intermediate_api: | ||
| config.run_single_model = True | ||
| config.tensor_parallel_degree = 1 | ||
| config.sharding_parallel_degree = 1 | ||
| config.sep_parallel_degree = 1 | ||
| config.context_parallel_degree = 1 | ||
|
|
||
| print("Final pre-training config:", config) | ||
|
|
||
| # Set the dtype for loading model | ||
|
|
@@ -286,9 +286,41 @@ def run_auto_parallel(model_args, data_args, generating_args, training_args): | |
| if training_args.bf16: | ||
| dtype = "bfloat16" | ||
|
|
||
| with paddle.LazyGuard(): | ||
| model = model_class.from_config(config, dtype=dtype) | ||
| criterion = criterion_class(config) | ||
| model_class = AutoModelForCausalLM | ||
|
|
||
| if not training_args.enable_auto_parallel and training_args.pipeline_parallel_degree > 1: | ||
| model_class = AutoModelForCausalLMPipe | ||
| if "LLama" in str(config.architectures): | ||
| try: | ||
| from utils.register_reshard import register_pp_reshard_information | ||
|
|
||
| register_pp_reshard_information(config.num_hidden_layers) | ||
| except: | ||
| print("Not register llama pp reshard information.") | ||
|
|
||
| architectures_to_check = {"Qwen2Moe", "DeepseekV2", "DeepseekV3"} | ||
| if ( | ||
| any(architecture in str(config.architectures) for architecture in architectures_to_check) | ||
| and training_args.data_parallel_degree > 1 | ||
| ): | ||
| training_args.use_expert_parallel = True | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 单卡模式下允许EP吗? |
||
|
|
||
| if model_args.continue_training: | ||
| # NOTE(gongenlei): new add | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. note去掉 |
||
| if training_args.autotuner_benchmark: | ||
| model = model_class.from_config(config, dtype=dtype) | ||
| else: | ||
| model = model_class.from_pretrained( | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 热启时不需要参考下面 |
||
| model_args.model_name_or_path, | ||
| config=config, | ||
| dtype=dtype, | ||
| ) | ||
| else: | ||
| if training_args.enable_auto_parallel: | ||
| with paddle.LazyGuard(): | ||
| model = model_class.from_config(config, dtype=dtype) | ||
| else: | ||
| model = model_class.from_config(config, dtype=dtype) | ||
|
|
||
| if training_args.recompute: | ||
|
|
||
|
|
@@ -344,7 +376,6 @@ def fn(layer): | |
|
|
||
| trainer = PretrainingTrainer( | ||
| model=model, | ||
| criterion=criterion, | ||
| args=training_args, | ||
| data_collator=data_collator, | ||
| train_dataset=train_dataset if training_args.do_train else None, | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -537,6 +537,9 @@ class PretrainedConfig: | |
| Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the | ||
| model has a output word embedding layer. | ||
|
|
||
| run_single_model (`bool`, *optional*, defaults to `False`): | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 如果这个是想表达非并行模式的话,名字并不表意,建议替换下,开发者可以更好理解,例如: |
||
| Whether to run the model in single card mode. When enabled, all parallel degree configurations will be disabled. | ||
|
|
||
| dtype (`str`, *optional*): | ||
| The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype` | ||
| (which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved | ||
|
|
@@ -601,6 +604,13 @@ def __init__(self, **kwargs): | |
| self.use_cache = kwargs.pop("use_cache", False) | ||
| self.tie_word_embeddings = kwargs.pop("tie_word_embeddings", True) | ||
|
|
||
| # for run model in single card mode | ||
| self.run_single_model = kwargs.pop("run_single_model", False) | ||
| if self.run_single_model: | ||
| self.tensor_parallel_degree = 1 | ||
| self.sep_parallel_degree = 1 | ||
| self.context_parallel_degree = 1 | ||
|
|
||
| # for transformers fuse | ||
| self.fuse_linear = kwargs.pop("fuse_linear", False) | ||
| self.fuse_attention_qkv = kwargs.pop("fuse_attention_qkv", False) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,40 @@ | ||
| # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved. | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
|
|
||
| import paddle.distributed as dist | ||
|
|
||
|
|
||
| def get_dist_config(model, prefix=""): | ||
| """Generate distributed configuration for Llama model""" | ||
| if prefix != "": | ||
| assert prefix.endswith(".") | ||
|
|
||
| config = { | ||
| "mp_config": { | ||
| "parallelize_plan": { | ||
| f"{prefix}llama.embed_tokens": dist.ColWiseParallel(gather_output=True), | ||
| f"{prefix}llama.layers.*.self_attn.qkv_proj": dist.ColWiseParallel(), | ||
| f"{prefix}llama.layers.*.self_attn.q_proj": dist.ColWiseParallel(), | ||
| f"{prefix}llama.layers.*.self_attn.k_proj": dist.ColWiseParallel(), | ||
| f"{prefix}llama.layers.*.self_attn.v_proj": dist.ColWiseParallel(), | ||
| f"{prefix}llama.layers.*.self_attn.o_proj": dist.RowWiseParallel(), | ||
| f"{prefix}llama.layers.*.mlp.gate_proj": dist.ColWiseParallel(), | ||
| f"{prefix}llama.layers.*.mlp.up_proj": dist.ColWiseParallel(), | ||
| f"{prefix}llama.layers.*.mlp.gate_up_fused_proj": dist.ColWiseParallel(), | ||
| f"{prefix}llama.layers.*.mlp.down_proj": dist.RowWiseParallel(), | ||
| f"{prefix}lm_head.weight": dist.ColWiseParallel(), | ||
| } | ||
| }, | ||
| } | ||
| return config |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
什么情况下会执行失败?不注册这个reshard会造成什么影响