forked from pesser/stable-diffusion
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
4 changed files
with
118 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,20 @@ | ||
#!/bin/bash | ||
export NODE_RANK=${SLURM_NODEID} | ||
echo "##########################################" | ||
echo MASTER_ADDR=${MASTER_ADDR} | ||
echo MASTER_PORT=${MASTER_PORT} | ||
echo NODE_RANK=${NODE_RANK} | ||
echo WORLD_SIZE=${WORLD_SIZE} | ||
echo "##########################################" | ||
# debug environment worked great so we stick with it | ||
# no magic there, just a miniconda python=3.9, pytorch=1.12, cudatoolkit=11.3 | ||
# env with pip dependencies from stable diffusion's requirements.txt | ||
eval "$(/fsx/stable-diffusion/debug/miniconda3/bin/conda shell.bash hook)" | ||
conda activate stable | ||
cd /fsx/stable-diffusion/stable-diffusion | ||
|
||
CONFIG=configs/stable-diffusion/txt2img-1p4B-multinode-clip-encoder-high-res-512-improvedaesthetic.yaml | ||
EXTRA="model.params.ckpt_path=/fsx/stable-diffusion/stable-diffusion/logs/2022-07-07T16-15-18_txt2img-1p4B-multinode-clip-encoder-high-res-512/checkpoints/last.ckpt" | ||
DEBUG="-d True lightning.callbacks.image_logger.params.batch_frequency=5" | ||
|
||
python main.py --base $CONFIG --gpus 0,1,2,3,4,5,6,7 -t --num_nodes ${WORLD_SIZE} --scale_lr False $EXTRA #$DEBUG |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,39 @@ | ||
#!/bin/bash | ||
#SBATCH --partition=compute-od-gpu | ||
#SBATCH --job-name=stable-diffusion-512cont-improvedaesthetic | ||
#SBATCH --nodes=20 | ||
#SBATCH --gpus-per-node=8 | ||
#SBATCH --cpus-per-gpu=4 | ||
#SBATCH --ntasks-per-node=1 | ||
#SBATCH --output=%x_%j.%n.out | ||
|
||
# nccl / efa stuff | ||
module load intelmpi | ||
source /opt/intel/mpi/latest/env/vars.sh | ||
export LD_LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib64:/usr/local/cuda-11.0/efa/lib:/usr/local/cuda-11.0/lib:/usr/local/cuda-11.0/lib64:/usr/local/cuda-11.0:/opt/nccl/build/lib:/opt/aws-ofi-nccl-install/lib:/opt/aws-ofi-nccl/lib:$LD_LIBRARY_PATH | ||
export NCCL_PROTO=simple | ||
export PATH=/opt/amazon/efa/bin:$PATH | ||
export LD_PRELOAD="/opt/nccl/build/lib/libnccl.so" | ||
export FI_EFA_FORK_SAFE=1 | ||
export FI_LOG_LEVEL=1 | ||
export FI_EFA_USE_DEVICE_RDMA=1 # use for p4dn | ||
export NCCL_DEBUG=info | ||
export PYTHONFAULTHANDLER=1 | ||
export CUDA_LAUNCH_BLOCKING=0 | ||
export OMPI_MCA_mtl_base_verbose=1 | ||
export FI_EFA_ENABLE_SHM_TRANSFER=0 | ||
export FI_PROVIDER=efa | ||
export FI_EFA_TX_MIN_CREDITS=64 | ||
export NCCL_TREE_THRESHOLD=0 | ||
|
||
# pytorch multinode vars | ||
# node rank should be set in launcher script | ||
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1) | ||
export MASTER_PORT=11338 | ||
export WORLD_SIZE=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l) | ||
|
||
echo MASTER_ADDR=${MASTER_ADDR} | ||
echo MASTER_PORT=${MASTER_PORT} | ||
echo WORLD_SIZE=${WORLD_SIZE} | ||
|
||
srun --output=%x_%j.%n.out bash /fsx/stable-diffusion/stable-diffusion/scripts/slurm/resume_512_improvedaesthetic/launcher.sh |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,20 @@ | ||
#!/bin/bash | ||
export NODE_RANK=${SLURM_NODEID} | ||
echo "##########################################" | ||
echo MASTER_ADDR=${MASTER_ADDR} | ||
echo MASTER_PORT=${MASTER_PORT} | ||
echo NODE_RANK=${NODE_RANK} | ||
echo WORLD_SIZE=${WORLD_SIZE} | ||
echo "##########################################" | ||
# debug environment worked great so we stick with it | ||
# no magic there, just a miniconda python=3.9, pytorch=1.12, cudatoolkit=11.3 | ||
# env with pip dependencies from stable diffusion's requirements.txt | ||
eval "$(/fsx/stable-diffusion/debug/miniconda3/bin/conda shell.bash hook)" | ||
conda activate stable | ||
cd /fsx/stable-diffusion/stable-diffusion | ||
|
||
CONFIG=configs/stable-diffusion/txt2img-multinode-clip-encoder-f16-768-laion-hr.yaml | ||
EXTRA="model.params.ckpt_path=/fsx/stable-diffusion/stable-diffusion/checkpoints/f16-33k+12k-hr_pruned.ckpt" | ||
DEBUG="-d True lightning.callbacks.image_logger.params.batch_frequency=5" | ||
|
||
python main.py --base $CONFIG --gpus 0,1,2,3,4,5,6,7 -t --num_nodes ${WORLD_SIZE} --scale_lr False $EXTRA #$DEBUG |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,39 @@ | ||
#!/bin/bash | ||
#SBATCH --partition=compute-od-gpu | ||
#SBATCH --job-name=stable-diffusion-768cont-resumehr | ||
#SBATCH --nodes=20 | ||
#SBATCH --gpus-per-node=8 | ||
#SBATCH --cpus-per-gpu=4 | ||
#SBATCH --ntasks-per-node=1 | ||
#SBATCH --output=%x_%j.%n.out | ||
|
||
# nccl / efa stuff | ||
module load intelmpi | ||
source /opt/intel/mpi/latest/env/vars.sh | ||
export LD_LIBRARY_PATH=/opt/aws-ofi-nccl/lib:/opt/amazon/efa/lib64:/usr/local/cuda-11.0/efa/lib:/usr/local/cuda-11.0/lib:/usr/local/cuda-11.0/lib64:/usr/local/cuda-11.0:/opt/nccl/build/lib:/opt/aws-ofi-nccl-install/lib:/opt/aws-ofi-nccl/lib:$LD_LIBRARY_PATH | ||
export NCCL_PROTO=simple | ||
export PATH=/opt/amazon/efa/bin:$PATH | ||
export LD_PRELOAD="/opt/nccl/build/lib/libnccl.so" | ||
export FI_EFA_FORK_SAFE=1 | ||
export FI_LOG_LEVEL=1 | ||
export FI_EFA_USE_DEVICE_RDMA=1 # use for p4dn | ||
export NCCL_DEBUG=info | ||
export PYTHONFAULTHANDLER=1 | ||
export CUDA_LAUNCH_BLOCKING=0 | ||
export OMPI_MCA_mtl_base_verbose=1 | ||
export FI_EFA_ENABLE_SHM_TRANSFER=0 | ||
export FI_PROVIDER=efa | ||
export FI_EFA_TX_MIN_CREDITS=64 | ||
export NCCL_TREE_THRESHOLD=0 | ||
|
||
# pytorch multinode vars | ||
# node rank should be set in launcher script | ||
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1) | ||
export MASTER_PORT=11338 | ||
export WORLD_SIZE=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l) | ||
|
||
echo MASTER_ADDR=${MASTER_ADDR} | ||
echo MASTER_PORT=${MASTER_PORT} | ||
echo WORLD_SIZE=${WORLD_SIZE} | ||
|
||
srun --output=%x_%j.%n.out bash /fsx/stable-diffusion/stable-diffusion/scripts/slurm/resume_768_hr/launcher.sh # srun vs mpirun? |