Skip to content

Commit

Permalink
add finetune scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
Manchery committed Jan 2, 2025
1 parent ee752be commit 71bec00
Show file tree
Hide file tree
Showing 7 changed files with 106 additions and 6 deletions.
8 changes: 6 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,8 @@ accelerate launch train_tokenizer.py \
--train_batch_size 16 --gradient_accumulation_steps 1 --disc_start 1000005 \
--oxe_data_mixes_type bair --resolution 64 --dataloader_num_workers 16 \
--rand_select --video_stepsize 1 --segment_horizon 16 --segment_length 8 --context_length 1 \
--pretrained_model_name_or_path pretrained_models/ivideogpt-oxe-64-act-free/tokenizer
--pretrained_model_name_or_path pretrained_models/ivideogpt-oxe-64-act-free/tokenizer \
--max_train_steps 200005
```

### Finetuning Transformer
Expand All @@ -120,11 +121,14 @@ accelerate launch train_gpt.py \
--oxe_data_mixes_type bair --resolution 64 --dataloader_num_workers 16 \
--video_stepsize 1 --segment_length 16 --context_length 1 \
--use_eval_dataset --use_fvd --use_frame_metrics \
--weight_decay 0.01 --llama_attn_drop 0.1 --embed_no_wd
--weight_decay 0.01 --llama_attn_drop 0.1 --embed_no_wd \
--max_train_steps 100005
```

For action-free video prediction, remove `--load_internal_llm --action_conditioned`.

See more scripts at [`scripts/finetune`](/scripts/finetune).

### Evaluation

To evaluate the checkpoints only, run:
Expand Down
6 changes: 4 additions & 2 deletions scripts/finetune/bair-64-act-cond.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@ accelerate launch train_tokenizer.py \
--train_batch_size 16 --gradient_accumulation_steps 1 --disc_start 1000005 \
--oxe_data_mixes_type bair --resolution 64 --dataloader_num_workers 16 \
--rand_select --video_stepsize 1 --segment_horizon 16 --segment_length 8 --context_length 1 \
--pretrained_model_name_or_path pretrained_models/ivideogpt-oxe-64-act-free/tokenizer
--pretrained_model_name_or_path pretrained_models/ivideogpt-oxe-64-act-free/tokenizer \
--max_train_steps 200005


accelerate launch train_gpt.py \
Expand All @@ -18,4 +19,5 @@ accelerate launch train_gpt.py \
--oxe_data_mixes_type bair --resolution 64 --dataloader_num_workers 16 \
--video_stepsize 1 --segment_length 16 --context_length 1 \
--use_eval_dataset --use_fvd --use_frame_metrics \
--weight_decay 0.01 --llama_attn_drop 0.1 --embed_no_wd
--weight_decay 0.01 --llama_attn_drop 0.1 --embed_no_wd \
--max_train_steps 100005
6 changes: 4 additions & 2 deletions scripts/finetune/bair-64-act-free.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@ accelerate launch train_tokenizer.py \
--train_batch_size 16 --gradient_accumulation_steps 1 --disc_start 1000005 \
--oxe_data_mixes_type bair --resolution 64 --dataloader_num_workers 16 \
--rand_select --video_stepsize 1 --segment_horizon 16 --segment_length 8 --context_length 1 \
--pretrained_model_name_or_path pretrained_models/ivideogpt-oxe-64-act-free/tokenizer
--pretrained_model_name_or_path pretrained_models/ivideogpt-oxe-64-act-free/tokenizer \
--max_train_steps 200005


accelerate launch train_gpt.py \
Expand All @@ -18,4 +19,5 @@ accelerate launch train_gpt.py \
--oxe_data_mixes_type bair --resolution 64 --dataloader_num_workers 16 \
--video_stepsize 1 --segment_length 16 --context_length 1 \
--use_eval_dataset --use_fvd --use_frame_metrics \
--weight_decay 0.01 --llama_attn_drop 0.1 --embed_no_wd
--weight_decay 0.01 --llama_attn_drop 0.1 --embed_no_wd \
--max_train_steps 70005
23 changes: 23 additions & 0 deletions scripts/finetune/robonet-256-act-cond.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
accelerate launch train_tokenizer.py \
--exp_name robonet_tokenizer_ft --output_dir log_vqgan --seed 0 --mixed_precision bf16 \
--model_type ctx_vqgan \
--train_batch_size 2 --gradient_accumulation_steps 4 --disc_start 1000005 \
--oxe_data_mixes_type tfds_robonet --resolution 256 --dataloader_num_workers 16 \
--rand_select --video_stepsize 1 --segment_horizon 12 --segment_length 8 --context_length 2 --no_aug \
--pretrained_model_name_or_path pretrained_models/ivideogpt-oxe-256-act-free/tokenizer \
--max_train_steps 200005


accelerate launch train_gpt.py \
--exp_name robonet_llama_ft --output_dir log_trm --seed 0 --mixed_precision bf16 \
--vqgan_type ctx_vqgan \
--pretrained_model_name_or_path {log directory of finetuned tokenizer}/unwrapped_model \
--config_name configs/llama/config.json --load_internal_llm --action_conditioned --action_dim 5 \
--pretrained_transformer_path pretrained_models/ivideogpt-oxe-256-act-free/transformer \
--per_device_train_batch_size 4 --gradient_accumulation_steps 2 \
--learning_rate 1e-4 --lr_scheduler_type cosine \
--oxe_data_mixes_type tfds_robonet --resolution 256 --dataloader_num_workers 16 \
--video_stepsize 1 --segment_length 12 --context_length 2 \
--use_eval_dataset --use_fvd --use_frame_metrics \
--weight_decay 0.01 --llama_attn_drop 0.1 --embed_no_wd \
--max_train_steps 600005
23 changes: 23 additions & 0 deletions scripts/finetune/robonet-64-act-cond.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
accelerate launch train_tokenizer.py \
--exp_name robonet_tokenizer_ft --output_dir log_vqgan --seed 0 --mixed_precision bf16 \
--model_type ctx_vqgan \
--train_batch_size 16 --gradient_accumulation_steps 1 --disc_start 1000005 \
--oxe_data_mixes_type tfds_robonet --resolution 64 --dataloader_num_workers 16 \
--rand_select --video_stepsize 1 --segment_horizon 12 --segment_length 8 --context_length 2 \
--pretrained_model_name_or_path pretrained_models/ivideogpt-oxe-64-act-free/tokenizer \
--max_train_steps 600005


accelerate launch train_gpt.py \
--exp_name robonet_llama_ft --output_dir log_trm --seed 0 --mixed_precision bf16 \
--vqgan_type ctx_vqgan \
--pretrained_model_name_or_path {log directory of finetuned tokenizer}/unwrapped_model \
--config_name configs/llama/config.json --load_internal_llm --action_conditioned --action_dim 5 \
--pretrained_transformer_path pretrained_models/ivideogpt-oxe-64-act-free/transformer \
--per_device_train_batch_size 16 --gradient_accumulation_steps 1 \
--learning_rate 1e-4 --lr_scheduler_type cosine \
--oxe_data_mixes_type tfds_robonet --resolution 64 --dataloader_num_workers 16 \
--video_stepsize 1 --segment_length 12 --context_length 2 \
--use_eval_dataset --use_fvd --use_frame_metrics \
--weight_decay 0.01 --llama_attn_drop 0.1 --embed_no_wd \
--max_train_steps 600005
23 changes: 23 additions & 0 deletions scripts/finetune/vp2-robodesk.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
accelerate launch train_tokenizer.py \
--exp_name vp2_robodesk_tokenizer_ft --output_dir log_vqgan --seed 0 --mixed_precision bf16 \
--model_type ctx_vqgan \
--train_batch_size 16 --gradient_accumulation_steps 1 --disc_start 1000005 \
--oxe_data_mixes_type vp2_robodesk --resolution 64 --dataloader_num_workers 16 \
--rand_select --video_stepsize 1 --segment_horizon 12 --segment_length 8 --context_length 2 \
--pretrained_model_name_or_path pretrained_models/ivideogpt-oxe-64-act-free/tokenizer \
--max_train_steps 200005


accelerate launch train_gpt.py \
--exp_name vp2_robodesk_llama_ft --output_dir log_trm --seed 0 --mixed_precision bf16 \
--vqgan_type ctx_vqgan \
--pretrained_model_name_or_path {log directory of finetuned tokenizer}/unwrapped_model \
--config_name configs/llama/config.json --load_internal_llm --action_conditioned --action_dim 5 \
--pretrained_transformer_path pretrained_models/ivideogpt-oxe-64-act-free/transformer \
--per_device_train_batch_size 16 --gradient_accumulation_steps 1 \
--learning_rate 1e-4 --lr_scheduler_type cosine \
--oxe_data_mixes_type vp2_robodesk --resolution 64 --dataloader_num_workers 16 \
--video_stepsize 1 --segment_length 12 --context_length 2 \
--use_eval_dataset --use_fvd --use_frame_metrics \
--weight_decay 0.01 --llama_attn_drop 0.1 --embed_no_wd \
--max_train_steps 200005
23 changes: 23 additions & 0 deletions scripts/finetune/vp2-robosuite.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
accelerate launch train_tokenizer.py \
--exp_name vp2_robosuite_tokenizer_ft --output_dir log_vqgan --seed 0 --mixed_precision bf16 \
--model_type ctx_vqgan \
--train_batch_size 16 --gradient_accumulation_steps 1 --disc_start 1000005 \
--oxe_data_mixes_type vp2_robosuite --resolution 64 --dataloader_num_workers 16 \
--rand_select --video_stepsize 1 --segment_horizon 12 --segment_length 8 --context_length 2 \
--pretrained_model_name_or_path pretrained_models/ivideogpt-oxe-64-act-free/tokenizer \
--max_train_steps 200005


accelerate launch train_gpt.py \
--exp_name vp2_robosuite_llama_ft --output_dir log_trm --seed 0 --mixed_precision bf16 \
--vqgan_type ctx_vqgan \
--pretrained_model_name_or_path {log directory of finetuned tokenizer}/unwrapped_model \
--config_name configs/llama/config.json --load_internal_llm --action_conditioned --action_dim 4 \
--pretrained_transformer_path pretrained_models/ivideogpt-oxe-64-act-free/transformer \
--per_device_train_batch_size 16 --gradient_accumulation_steps 1 \
--learning_rate 1e-4 --lr_scheduler_type cosine \
--oxe_data_mixes_type vp2_robosuite --resolution 64 --dataloader_num_workers 16 \
--video_stepsize 1 --segment_length 12 --context_length 2 \
--use_eval_dataset --use_fvd --use_frame_metrics \
--weight_decay 0.01 --llama_attn_drop 0.1 --embed_no_wd \
--max_train_steps 200005

0 comments on commit 71bec00

Please sign in to comment.