Skip to content

Commit

Permalink
Remove obsolete layer_norm_names parameter and add peft>=0.3.0 to req…
Browse files Browse the repository at this point in the history
…uirements (huggingface#366)

* remove obsolete layer_norm_names parameter

* remove obsolete parameter layer_norm_names and add peft>=0.3.0 to requirements

* make style - oops

* typo
  • Loading branch information
teticio authored May 15, 2023
1 parent a31bad8 commit e547c39
Show file tree
Hide file tree
Showing 4 changed files with 2 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,7 @@ class DataTrainingArguments:


if "gpt-neox" in model_args.model_name_or_path:
model = prepare_model_for_int8_training(
model, output_embedding_layer_name="embed_out", layer_norm_names=["layer_norm", "layernorm"]
)
model = prepare_model_for_int8_training(model, output_embedding_layer_name="embed_out")
else:
model = prepare_model_for_int8_training(model)

Expand Down
1 change: 0 additions & 1 deletion examples/sentiment/scripts/gpt2-sentiment_peft.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,6 @@ def collator(data):
config.model_name,
load_in_8bit=True,
peft_config=lora_config,
layer_norm_names=[],
)

tokenizer = AutoTokenizer.from_pretrained(config.model_name)
Expand Down
1 change: 0 additions & 1 deletion examples/stack_llama/scripts/rl_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,6 @@ def collator(data):
load_in_8bit=True,
device_map={"": current_device},
peft_config=lora_config,
layer_norm_names=[],
)

optimizer = None
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ torch>=1.4.0
tqdm
transformers
accelerate
peft>=0.3.0

0 comments on commit e547c39

Please sign in to comment.