first commit

This commit is contained in:
Guocheng Qian
2023-08-02 19:51:43 -07:00
parent c2891c38cc
commit 13e18567fa
202 changed files with 43362 additions and 17 deletions

4
scripts/install_ext.sh Normal file
View File

@@ -0,0 +1,4 @@
pip install ./raymarching
pip install ./shencoder
pip install ./freqencoder
pip install ./gridencoder

View File

@@ -0,0 +1,6 @@
topdir=$1
imagename=$2 # rgba.png or image.png
for i in $topdir/*; do
echo preprocessing "$i"/$imagename ...
python scripts/preprocess_image.py "$i"/$imagename
done

75
scripts/magic123/run_2dprior.sh Executable file
View File

@@ -0,0 +1,75 @@
#! /bin/bash
#SBATCH -N 1
#SBATCH --array=0
#SBATCH -J magic123
#SBATCH -o slurm_logs/%x.%3a.%A.out
#SBATCH -e slurm_logs/%x.%3a.%A.err
#SBATCH --time=3:00:00
#SBATCH --gres=gpu:v100:1
#SBATCH --cpus-per-gpu=6
#SBATCH --mem=30G
##SBATCH --gpus=1
module load gcc/7.5.0
#source ~/.bashrc
#source activate magic123
source venv_magic123/bin/activate
which python
nvidia-smi
nvcc --version
hostname
NUM_GPU_AVAILABLE=`nvidia-smi --query-gpu=name --format=csv,noheader | wc -l`
echo "number of gpus:" $NUM_GPU_AVAILABLE
RUN_ID=$2
RUN_ID2=$3
DATA_DIR=$4
IMAGE_NAME=$5
step1=$6
step2=$7
FILENAME=$(basename $DATA_DIR)
dataset=$(basename $(dirname $DATA_DIR))
echo reconstruct $FILENAME under dataset $dataset from folder $DATA_DIR ...
if (( ${step1} )); then
CUDA_VISIBLE_DEVICES=$1 python main.py -O \
--text "A high-resolution DSLR image of <token>" \
--sd_version 1.5 \
--image ${DATA_DIR}/${IMAGE_NAME} \
--learned_embeds_path ${DATA_DIR}/learned_embeds.bin \
--workspace out/magic123-2d/magic123-2d-${RUN_ID}-coarse/$dataset/magic123_2d_${FILENAME}_${RUN_ID}_coarse \
--optim adam \
--iters 5000 \
--guidance SD \
--lambda_guidance 1 \
--guidance_scale 100 \
--latent_iter_ratio 0 \
--normal_iter_ratio 0.2 \
--t_range 0.2 0.6 \
--bg_radius -1 \
--save_mesh \
${@:8}
fi
if (( ${step2} )); then
CUDA_VISIBLE_DEVICES=$1 python main.py -O \
--text "A high-resolution DSLR image of <token>" \
--sd_version 1.5 \
--image ${DATA_DIR}/${IMAGE_NAME} \
--learned_embeds_path ${DATA_DIR}/learned_embeds.bin \
--workspace out/magic123-2d/magic123-2d-${RUN_ID}-${RUN_ID2}/$dataset/magic123_2d_${FILENAME}_${RUN_ID}_${RUN_ID2} \
--dmtet --init_ckpt out/magic123-2d/magic123-2d-${RUN_ID}-coarse/$dataset/magic123_2d_${FILENAME}_${RUN_ID}_coarse/checkpoints/magic123_2d_${FILENAME}_${RUN_ID}_coarse.pth \
--iters 5000 \
--optim adam \
--latent_iter_ratio 0 \
--guidance SD \
--lambda_guidance 1e-3 \
--guidance_scale 100 \
--rm_edge \
--bg_radius -1 \
--save_mesh
fi

View File

@@ -0,0 +1,73 @@
#! /bin/bash
#SBATCH -N 1
#SBATCH --array=0
#SBATCH -J magic123
#SBATCH -o slurm_logs/%x.%3a.%A.out
#SBATCH -e slurm_logs/%x.%3a.%A.err
#SBATCH --time=3:00:00
#SBATCH --gres=gpu:v100:1
#SBATCH --cpus-per-gpu=6
#SBATCH --mem=30G
##SBATCH --gpus=1
module load gcc/7.5.0
#source ~/.bashrc
#source activate magic123
source venv_magic123/bin/activate
which python
nvidia-smi
nvcc --version
hostname
NUM_GPU_AVAILABLE=`nvidia-smi --query-gpu=name --format=csv,noheader | wc -l`
echo "number of gpus:" $NUM_GPU_AVAILABLE
RUN_ID=$2
RUN_ID2=$3
DATA_DIR="data/demo/ironman"
IMAGE_NAME="rgba.png"
step1=$4
step2=$5
FILENAME=$(basename $DATA_DIR)
dataset=$(basename $(dirname $DATA_DIR))
echo reconstruct $FILENAME under dataset $dataset from folder $DATA_DIR ...
if (( ${step1} )); then
CUDA_VISIBLE_DEVICES=$1 python main.py -O \
--text "A high-resolution DSLR image of a full body ironman" \
--sd_version 1.5 \
--image ${DATA_DIR}/${IMAGE_NAME} \
--workspace out/magic123-2d-noinv/magic123-2d-noinv-${RUN_ID}-coarse/$dataset/magic123-2d-noinv_${FILENAME}_${RUN_ID}_coarse \
--optim adam \
--iters 5000 \
--guidance SD \
--lambda_guidance 1 \
--guidance_scale 100 \
--latent_iter_ratio 0 \
--normal_iter_ratio 0.2 \
--t_range 0.2 0.6 \
--bg_radius -1 \
--save_mesh \
${@:6}
fi
if (( ${step2} )); then
CUDA_VISIBLE_DEVICES=$1 python main.py -O \
--text "A high-resolution DSLR image of a full body ironman" \
--sd_version 1.5 \
--image ${DATA_DIR}/${IMAGE_NAME} \
--workspace out/magic123-2d-noinv/magic123-2d-noinv-${RUN_ID}-${RUN_ID2}/$dataset/magic123-2d-noinv_${FILENAME}_${RUN_ID}_${RUN_ID2} \
--dmtet --init_ckpt out/magic123-2d-noinv/magic123-2d-noinv-${RUN_ID}-coarse/$dataset/magic123-2d-noinv_${FILENAME}_${RUN_ID}_coarse/checkpoints/magic123-2d-noinv_${FILENAME}_${RUN_ID}_coarse.pth \
--iters 5000 \
--optim adam \
--latent_iter_ratio 0 \
--guidance SD \
--lambda_guidance 1e-3 \
--guidance_scale 100 \
--rm_edge \
--bg_radius -1 \
--save_mesh
fi

73
scripts/magic123/run_3dprior.sh Executable file
View File

@@ -0,0 +1,73 @@
#! /bin/bash
#SBATCH -N 1
#SBATCH --array=0
#SBATCH -J magic123
#SBATCH -o slurm_logs/%x.%3a.%A.out
#SBATCH -e slurm_logs/%x.%3a.%A.err
#SBATCH --time=3:00:00
#SBATCH --gres=gpu:v100:1
#SBATCH --cpus-per-gpu=6
#SBATCH --mem=30G
##SBATCH --gpus=1
module load gcc/7.5.0
#source ~/.bashrc
#source activate magic123
source venv_magic123/bin/activate
which python
nvidia-smi
nvcc --version
hostname
NUM_GPU_AVAILABLE=`nvidia-smi --query-gpu=name --format=csv,noheader | wc -l`
echo "number of gpus:" $NUM_GPU_AVAILABLE
RUN_ID=$2
RUN_ID2=$3
DATA_DIR=$4
IMAGE_NAME=$5
step1=$6
step2=$7
FILENAME=$(basename $DATA_DIR)
dataset=$(basename $(dirname $DATA_DIR))
echo reconstruct $FILENAME under dataset $dataset from folder $DATA_DIR ...
if (( ${step1} )); then
CUDA_VISIBLE_DEVICES=$1 python main.py -O \
--text "A high-resolution DSLR image" \
--sd_version 1.5 \
--image ${DATA_DIR}/${IMAGE_NAME} \
--workspace out/magic123-3d/magic123-3d-${RUN_ID}/$dataset/magic123-3d_${FILENAME}_${RUN_ID}_5k \
--optim adam \
--iters 5000 \
--guidance zero123 \
--lambda_guidance 40 \
--guidance_scale 5 \
--latent_iter_ratio 0 \
--normal_iter_ratio 0.2 \
--t_range 0.2 0.6 \
--bg_radius -1 \
--save_mesh \
${@:8}
fi
if (( ${step2} )); then
CUDA_VISIBLE_DEVICES=$1 python main.py -O \
--text "A high-resolution DSLR image" \
--sd_version 1.5 \
--image ${DATA_DIR}/${IMAGE_NAME} \
--workspace out/magic123-3d/magic123-3d-${RUN_ID}-${RUN_ID2}/$dataset/magic123-3d_${FILENAME}_${RUN_ID}_${RUN_ID2} \
--dmtet --init_ckpt out/magic123-3d/magic123-3d-${RUN_ID}/$dataset/magic123-3d_${FILENAME}_${RUN_ID}_5k/checkpoints/magic123-3d_${FILENAME}_${RUN_ID}_5k.pth \
--iters 5000 \
--optim adam \
--latent_iter_ratio 0 \
--guidance zero123 \
--lambda_guidance 0.01 \
--guidance_scale 5 \
--bg_radius -1 \
--rm_edge \
--save_mesh
fi

View File

@@ -0,0 +1,76 @@
#! /bin/bash
#SBATCH -N 1
#SBATCH --array=0
#SBATCH -J magic123
#SBATCH -o slurm_logs/%x.%3a.%A.out
#SBATCH -e slurm_logs/%x.%3a.%A.err
#SBATCH --time=3:00:00
#SBATCH --gres=gpu:v100:1
#SBATCH --cpus-per-gpu=6
#SBATCH --mem=30G
##SBATCH --gpus=1
module load gcc/7.5.0
#source ~/.bashrc
#source activate magic123
source venv_magic123/bin/activate
which python
nvidia-smi
nvcc --version
hostname
NUM_GPU_AVAILABLE=`nvidia-smi --query-gpu=name --format=csv,noheader | wc -l`
echo "number of gpus:" $NUM_GPU_AVAILABLE
RUN_ID=$2 # jobname for the first stage
RUN_ID2=$3 # jobname for the second stage
DATA_DIR=$4 # path to the directory containing the images, e.g. data/nerf4/chair
IMAGE_NAME=$5 # name of the image file, e.g. rgba.png
step1=$6 # whether to use the first stage
step2=$7 # whether to use the second stage
FILENAME=$(basename $DATA_DIR)
dataset=$(basename $(dirname $DATA_DIR))
echo reconstruct $FILENAME under dataset $dataset from folder $DATA_DIR ...
if (( ${step1} )); then
CUDA_VISIBLE_DEVICES=$1 python main.py -O \
--text "A high-resolution DSLR image of <token>" \
--sd_version 1.5 \
--image ${DATA_DIR}/${IMAGE_NAME} \
--learned_embeds_path ${DATA_DIR}/learned_embeds.bin \
--workspace out/magic123-${RUN_ID}-coarse/$dataset/magic123_${FILENAME}_${RUN_ID}_coarse \
--optim adam \
--iters 5000 \
--guidance SD zero123 \
--lambda_guidance 1.0 40 \
--guidance_scale 100 5 \
--latent_iter_ratio 0 \
--normal_iter_ratio 0.2 \
--t_range 0.2 0.6 \
--bg_radius -1 \
--save_mesh \
${@:8}
fi
if (( ${step2} )); then
CUDA_VISIBLE_DEVICES=$1 python main.py -O \
--text "A high-resolution DSLR image of <token>" \
--sd_version 1.5 \
--image ${DATA_DIR}/${IMAGE_NAME} \
--learned_embeds_path ${DATA_DIR}/learned_embeds.bin \
--workspace out/magic123-${RUN_ID}-${RUN_ID2}/$dataset/magic123_${FILENAME}_${RUN_ID}_${RUN_ID2} \
--dmtet --init_ckpt out/magic123-${RUN_ID}-coarse/$dataset/magic123_${FILENAME}_${RUN_ID}_coarse/checkpoints/magic123_${FILENAME}_${RUN_ID}_coarse.pth \
--iters 5000 \
--optim adam \
--latent_iter_ratio 0 \
--guidance SD zero123 \
--lambda_guidance 1e-3 0.01 \
--guidance_scale 100 5 \
--rm_edge \
--bg_radius -1 \
--save_mesh
fi

View File

@@ -0,0 +1,13 @@
device=$1
runid=$2 # jobname for the first stage
runid2=$3 # jobname for the second stage
topdir=$4 # path to the directory containing the images, e.g. data/nerf4
imagename=$5
step1=$6
step2=$7
for i in $topdir/*; do
echo "$i"
[ -d "$i" ] && echo "$i exists."
bash scripts/magic123/run_both_priors.sh $device $runid "$i" $imagename $step1 $step2 ${@:8}
done

View File

@@ -0,0 +1,25 @@
device=$1
runid=$2 # jobname for the first stage
runid2=$3 # jobname for the second stage
imagename=$4
step1=$5
step2=$6
examples=(
'data/realfusion15/teddy_bear/'
'data/realfusion15/mental_dragon_statue/'
'data/realfusion15/colorful_teapot/'
'data/realfusion15/fish_real_nemo/'
'data/realfusion15/two_cherries'
'data/realfusion15/watercolor_horse/'
'data/nerf4/chair'
'data/nerf4/drums'
'data/nerf4/ficus'
'data/nerf4/mic'
)
for i in "${examples[@]}"; do
echo "$i"
[ -d "$i" ] && echo "$i exists."
bash scripts/magic123/run_both_priors.sh $device $runid "$i" $imagename $step1 $step2 ${@:7}
done

21
scripts/snap/aws_folder.sh Executable file
View File

@@ -0,0 +1,21 @@
script_name=$1
runid=$2
runid2=$3
topdir=$4
imagename=$5
run1=$6
run2=$7
arguments="${@:8}"
timestamp=$(date +'%Y%m%d')
for i in $topdir/*; do
echo "$i"
[ -d "$i" ] && echo "$i exists."
example=$(basename $i)
echo ${@:8}
python scripts/snap/submit_cluster_job.py --yaml_folder scripts/snap/yamls \
--gpu_memory 40 --gpu_num 1 --force_node --cpu_num_per_gpu 6 --memory_per_gpu 30.0 --replicas 1 \
--project_name magic123 --project_support_alias img2mesh \
--job_name gqian-$timestamp-$runid-$runid2-$example \
--command "bash $script_name 0 $runid $runid2 $i $imagename $run1 $run2 $arguments "
done

27
scripts/snap/aws_list.sh Executable file
View File

@@ -0,0 +1,27 @@
script_name=$1
runid=$2
runid2=$3
imagename=$4
run1=$5
run2=$6
arguments="${@:7}"
examples=(
'data/realfusion15/two_donuts/'
'data/realfusion15/watercolor_horse/'
)
timestamp=$(date +'%Y%m%d')
for i in "${examples[@]}"; do
echo "$i"
[ -d "$i" ] && echo "$i exists."
example=$(basename $i)
echo ${@:8}
python scripts/snap/submit_cluster_job.py --yaml_folder scripts/snap/yamls \
--gpu_memory 40 --gpu_num 1 --force_node --cpu_num_per_gpu 6 --memory_per_gpu 30.0 --replicas 1 \
--project_name magic123 --project_support_alias img2mesh \
--job_name gqian-$timestamp-$runid-$runid2-$example \
--command "bash $script_name 0 $runid $runid2 $i $imagename $run1 $run2 $arguments "
done

View File

@@ -0,0 +1,8 @@
jobname=$1
timestamp=$(date +'%Y%m%d')
[ -d "$i" ] && echo "$i exists."
python scripts/snap/submit_cluster_job.py --yaml_folder scripts/snap/yamls \
--gpu_memory 40 --gpu_num 1 --force_node --cpu_num_per_gpu 6 --memory_per_gpu 30.0 --replicas 1 \
--project_name magic123 --project_support_alias img2mesh \
--job_name gqian-$timestamp-$1 \
--command "while :; do sleep 1000; done"

View File

@@ -0,0 +1,99 @@
import yaml
import os
import argparse
def generate_yaml(
gpu_memory=40, # 40G or 80G?
gpu_num=1,
cpu_num_per_gpu=6,
memory_per_gpu=30,
replicas=1,
project_name='magic123',
project_support_alias='img2mesh',
pre_run_event='mkdir -p /fsx/code && ln -s /nfs/code/gqian /fsx/code/ && cd /fsx/code/gqian/img2mesh',
command="while :; do sleep 1000; done",
job_name='debug',
force_node=False,
**kwargs
):
data = {
'docker_image': '440036398022.dkr.ecr.us-west-2.amazonaws.com/facecraft-ml:efa',
'project_name': project_name,
'project_support_alias': project_support_alias,
'team': 'creative_vision',
#'fsx': 'fs-0b933bba2f17fe699', # 100T genai filesystem
'fsx': 'fs-056caaa56fa5cc5f3', # 2T personal filesystem of gqian
'gpu_type': 'nvidia-tesla-a100',
'gpu_num': gpu_num,
'cpu_num': str(int(cpu_num_per_gpu * gpu_num)),
'memory': str(int(memory_per_gpu * gpu_num)),
'gpu_memory': str(int(gpu_memory)),
'pytorchjob': {
'replicas': replicas
},
'efa': True,
'script': {
'pre_run_event': str(pre_run_event),
'command': str(command),
'jobs': [
{'name': job_name}
]
}
}
if gpu_num == 1 and not force_node:
gpu_yaml = {
'custom_node_labels': {
'use_case': 'p4d_debug',
},
}
else:
gpu_yaml = {
'custom_node_labels': {
'snap.com/spine': 'unknown',
'snap.com/region': 'us-west-2c',
},
}
data.update(gpu_yaml)
return data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Script to generate yaml file for AWS Pytorch Job")
parser.add_argument('--yaml_folder', type=str, default='./', help='path to save the yaml folder')
parser.add_argument('--gpu_memory', type=int, default=40, help='GPU memory (in GB)')
parser.add_argument('--gpu_num', type=int, default=1, help='Number of GPUs')
parser.add_argument('--cpu_num_per_gpu', type=int, default=6, help='Number of CPUs per GPU')
parser.add_argument('--memory_per_gpu', type=float, default=30.0, help='Memory per GPU')
parser.add_argument('--replicas', type=int, default=1, help='Number of replicas')
parser.add_argument('--project_name', type=str, default='magic123', help='Project name')
parser.add_argument('--project_support_alias', type=str, default='img2mesh', help='Project support alias')
#parser.add_argument('--pre_run_event', type=str, default='export PATH=/nfs/code/gqian/miniconda3/bin:$PATH && conda init bash && source ~/.bashrc && cd /nfs/code/gqian/img2mesh ', help='Pre-run event command')
parser.add_argument('--pre_run_event', type=str, default='cd /nfs/code/gqian/img2mesh', help='Pre-run event command')
parser.add_argument('--command', type=str, default='while :; do sleep 1000; done', help='Command')
parser.add_argument('--job_name', type=str, default='debug', help='Job name')
parser.add_argument('--force_node', action='store_true',
help="use normal cluster not debug cluster")
args, unknown = parser.parse_known_args()
args.job_name = args.job_name.replace('_', '-').replace('.', '-')[:51] # do not support _ and . in job name, and max length is limited (around 70)
# "bash scripts/magic123/run_single_bothpriors.sh 0 r256 data/nerf4/drums rgba.png --h 300 --w 300"
data = generate_yaml(**vars(args))
yaml_str = yaml.safe_dump(data)
# Write the YAML content to a file
os.makedirs(args.yaml_folder, exist_ok=True)
yaml_path = os.path.join(args.yaml_folder, f'{args.job_name}.yaml')
with open(yaml_path, 'w') as file:
file.write(yaml_str)
print(f'YAML file saved to {yaml_path}')
# launch the job using snap_rutls
os.system(f'yes yes | snap_rutils cluster run {yaml_path} -s')
# show the job status
os.system(f'kubectl get pods | grep {args.job_name} ')
# show the job logs
os.system(f'kubectl logs {args.job_name}-worker-0')

View File

@@ -0,0 +1,18 @@
script_name=$1
runid=$2
runid2=$3
i=$4
imagename=$5
run1=$6
run2=$7
arguments="${@:8}"
timestamp=$(date +'%Y%m%d')
[ -d "$i" ] && echo "$i exists."
example=$(basename $i)
echo ${@:8}
python scripts/snap/submit_cluster_job.py --yaml_folder scripts/snap/yamls \
--gpu_memory 40 --gpu_num 1 --force_node --cpu_num_per_gpu 6 --memory_per_gpu 30.0 --replicas 1 \
--project_name magic123 --project_support_alias img2mesh \
--job_name gqian-$timestamp-$runid-$runid2-$example \
--command "bash $script_name 0 $runid $runid2 $i $imagename $run1 $run2 $arguments "

View File

@@ -0,0 +1,52 @@
#! /bin/bash
#SBATCH -N 1
#SBATCH --array=0
#SBATCH -J dreamfusion
#SBATCH -o slurm_logs/%x.%3a.%A.out
#SBATCH -e slurm_logs/%x.%3a.%A.err
#SBATCH --time=9:00:00
#SBATCH --gres=gpu:v100:1
#SBATCH --cpus-per-gpu=6
#SBATCH --mem=30G
##SBATCH --gpus=1
module load gcc/7.5.0
echo "===> Anaconda env loaded"
#source ~/.bashrc
#source activate magic123
source venv_magic123/bin/activate
nvidia-smi
nvcc --version
hostname
NUM_GPU_AVAILABLE=`nvidia-smi --query-gpu=name --format=csv,noheader | wc -l`
echo "number of gpus:" $NUM_GPU_AVAILABLE
MODEL_NAME=$2 # "path-to-pretrained-model" runwayml/stable-diffusion-v1-5
DATA_DIR=$3 # "path-to-dir-containing-your-image"
OUTPUT_DIR=$4 # "path-to-desired-output-dir"
placeholder_token=$5 # _ironman_
init_token=$6 # ironman
# run texturaal inversion
CUDA_VISIBLE_DEVICES=$1 python textual-inversion/textual_inversion.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--train_data_dir=$DATA_DIR \
--learnable_property="object" \
--placeholder_token=$placeholder_token \
--initializer_token=$init_token \
--resolution=512 \
--train_batch_size=16 \
--gradient_accumulation_steps=1 \
--max_train_steps=3000 \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--output_dir=$OUTPUT_DIR \
--use_augmentations \
${@:7}
# test textural inversion
CUDA_VISIBLE_DEVICES=$1 python guidance/sd_utils.py --text "A high-resolution DSLR image of <token>" --learned_embeds_path $OUTPUT_DIR --workspace $OUTPUT_DIR

View File

@@ -0,0 +1,13 @@
examples=(
'data/nerf4/chair'
'data/nerf4/drums'
'data/nerf4/ficus'
'data/nerf4/mic'
)
for i in "${examples[@]}"; do
filename=$(basename "$i")
bash scripts/texural_inversion/textural_inversion.sh 0 runwayml/stable-diffusion-v1-5 "$i"/rgba.png out/texural_inversion/${filename} _nerf_${filename}_ ${filename} --max_train_steps 3000
done