Skip to content

Commit

Permalink
temp
Browse files Browse the repository at this point in the history
  • Loading branch information
root committed Feb 2, 2024
1 parent 638415e commit 3967df0
Show file tree
Hide file tree
Showing 17 changed files with 687 additions and 132 deletions.
6 changes: 2 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,9 @@ We recommend using anaconda to manage the python environments.
conda install pytorch==2.0.0 torchvision==0.15.0 torchaudio==2.0.0 pytorch-cuda=11.8 -c pytorch -c nvidia
pip install submodules/diff-gaussian-rasterization
pip install submodules/simple-knn
pip install --upgrade https://github.com/unlimblue/KNN_CUDA/releases/download/0.2/KNN_CUDA-0.2-py3-none-any.whl
pip install --upgrade https://github.com/unlimblue/KNN_CUDA/releases/download/0.2/KNN_CUDA-0.2-py3-none-any.whl -i https://pypi.tuna.tsinghua.edu.cn/simple



pip install -r requirement.txt
pip install -r requirement.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
```

Tips: We implement the [alpha mask loss version](https://github.com/ashawkey/diff-gaussian-rasterization) based on the official [diff-gaussian-rasterization](https://github.com/graphdeco-inria/diff-gaussian-rasterization/tree/59f5f77e3ddbac3ed9db93ec2cfe99ed6c5d121d).
Expand Down
2 changes: 0 additions & 2 deletions arguments/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,6 @@ def __init__(self, parser):
self.densification_interval = 100
self.opacity_reset_interval = 4000
self.densify_from_iter = 400 #500
# self.densify_until_iter = 1000 #15_000
# self.densify_until_iter = 1500 #15_000
self.densify_until_iter = 2000 #15_000
self.densify_grad_threshold = 0.0002
super().__init__(parser, "Optimization Parameters")
Expand Down
20 changes: 13 additions & 7 deletions gaussian_renderer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,7 @@
from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer
from scene.gaussian_model import GaussianModel
from utils.sh_utils import eval_sh


from smplx.lbs import batch_rodrigues

def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None, return_smpl_rot=False, transforms=None, translation=None):
"""
Expand Down Expand Up @@ -68,24 +67,32 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor,
# Ours
pose_out = pc.auto_regression(viewpoint_camera.smpl_param['poses'],) # torch.Size([1, 72])
pose_out['target_R'] = viewpoint_camera.smpl_param['pose_rotmats']
correct_Rs =pose_out['Rs'].reshape(1,23,3,3)

pose_ = viewpoint_camera.smpl_param['poses']
ident = torch.eye(3).cuda().float()
batch_size = pose_.shape[0]
rot_mats = batch_rodrigues(pose_.view(-1, 3)).view([batch_size, -1, 3, 3])
rot_mats_no_root = rot_mats[:, 1:]
correct_Rs = torch.matmul(rot_mats_no_root.reshape(-1, 3, 3), pose_out['Rs'].reshape(-1, 3, 3)).reshape(-1, 23, 3, 3)

lbs_weights = pc.cross_attention_lbs(means3D[None],correct_Rs)
correct_Rs =pose_out['Rs'].reshape(1,23,3,3)

# Baseline
# pose_out = pc.pose_decoder(viewpoint_camera.smpl_param['poses'][:, 3:])
# pose_out = pc.pose_decoder(viewpoint_camera.smpl_param['poses'][:, 3:])
# correct_Rs = pose_out['Rs']

# lbs_weights = pc.weight_offset_decoder(means3D[None].detach()) # torch.Size([1, 6890, 3])
# lbs_weights = lbs_weights.permute(0,2,1) # torch.Size([1, 6890, 24])

# transform points
# torch.Size([1, 6890, 3])

_, means3D, bweights, transforms, translation = pc.coarse_deform_c2source(means3D[None], viewpoint_camera.smpl_param,viewpoint_camera.big_pose_smpl_param,viewpoint_camera.big_pose_world_vertex[None], lbs_weights=lbs_weights, correct_Rs=correct_Rs, return_transl=return_smpl_rot)
else:
bweights = None
correct_Rs = None
lbs_weights = None
lbs_weights = Noned
means3D = torch.matmul(transforms, means3D[..., None]).squeeze(-1) + translation

means3D = means3D.squeeze() # torch.Size([6890, 3])
Expand Down Expand Up @@ -145,5 +152,4 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor,
"pose_out":pose_out,
# "lbs_weights":lbs_weights,
"lbs_weights":bweights,
"means3D":means3D}

"means3D":means3D}
145 changes: 81 additions & 64 deletions get_result.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -177,88 +177,105 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"3\n",
"4\n",
"==========\n",
"/HOME/HOME/Caixiang/GauHuman/result/left_rot_w_o_knn.txt\n",
"[3200. 31.88573173 0.97478237 17.45978015]\n",
"[3200. 33.49919013 0.96853075 26.87959641]\n",
"[2700. 28.1697915 0.95575932 35.7266138 ]\n",
"[2700. 32.01727524 0.9658459 29.14347065]\n",
"[2500. 30.14890172 0.95781126 33.12674873]\n",
"[2700. 31.33987362 0.9601679 29.0177802 ]\n",
"[2833.33333333 31.17679399 0.96381625 28.55899832]\n",
"==========\n",
"/HOME/HOME/Caixiang/GauHuman/result/knn5_threshold004.txt\n",
"[3000. 31.86517051 0.97465325 17.64667291]\n",
"[3200. 33.48071525 0.96847608 27.28678166]\n",
"[2700. 28.17982278 0.95589501 35.81094667]\n",
"[2200. 32.10495282 0.96610117 29.22210581]\n",
"[2500. 30.15789562 0.95777659 33.26970976]\n",
"[2500. 31.34371296 0.96013558 29.06167956]\n",
"[2683.33333333 31.18871166 0.96383961 28.71631606]\n",
"/HOME/HOME/Caixiang/GauHuman/result/left_rot_new_gau_kl.txt\n",
"[3200. 31.87740911 0.9745842 17.67244392]\n",
"[3200. 33.53834264 0.96861405 27.19227238]\n",
"[2700. 28.21934604 0.95595338 35.85444013]\n",
"[3400. 31.98790298 0.96575824 29.42002674]\n",
"[2500. 30.18010379 0.95780448 33.49907035]\n",
"[2500. 31.33897374 0.96000669 29.27866777]\n",
"[3000. 31.87116905 0.97462126 17.64321321]\n",
"[3200. 33.52655777 0.96855634 27.28679506]\n",
"[2700. 28.22548392 0.95600264 35.85649823]\n",
"[3400. 32.0024897 0.9657542 29.47331649]\n",
"[2500. 30.16266497 0.95770316 33.41179252]\n",
"[3400. 31.29412656 0.9598087 29.22097343]\n",
"[2975. 31.18538086 0.96376394 28.81745918]\n",
"==========\n",
"/HOME/HOME/Caixiang/GauHuman/result/knn5_threshold0055.txt\n",
"[3000. 31.85109 0.97467119 17.30288314]\n",
"[3200. 33.50715609 0.96857918 27.0390654 ]\n",
"[2700. 28.17620545 0.95581187 35.70530461]\n",
"[2700. 31.99905494 0.96579295 29.22874703]\n",
"[2500. 30.14880453 0.95777959 33.2863691 ]\n",
"[3400. 31.29782269 0.95997602 29.05821454]\n",
"[2916.66666667 31.16335562 0.96376847 28.60343064]\n",
"/HOME/HOME/Caixiang/GauHuman/result/left_rot.txt\n",
"[3000. 31.83522161 0.97458865 17.41441259]\n",
"[3200. 33.50095347 0.96847809 26.83169325]\n",
"[2700. 28.1889178 0.95591124 35.69853477]\n",
"[3200. 32.09020775 0.9660003 29.22096264]\n",
"[3000. 30.14676305 0.95757332 33.16421561]\n",
"[2500. 31.3268466 0.96013386 29.05340674]\n",
"[2933.33333333 31.18148505 0.96378091 28.56387094]\n",
"==========\n",
"/HOME/HOME/Caixiang/GauHuman/result/knn5_threshold0025.txt\n",
"[3000. 31.82804017 0.97447005 18.84031826]\n",
"[3200. 33.49542318 0.96837144 27.92167135]\n",
"[2700. 28.22488061 0.95600595 36.55952475]\n",
"[3400. 31.9911635 0.96580783 29.77656518]\n",
"[2500. 30.18759104 0.95781264 33.43688359]\n",
"[2700. 31.3399112 0.96016819 29.26617401]\n",
"[2916.66666667 31.17783495 0.96377268 29.30018952]\n"
"/HOME/HOME/Caixiang/GauHuman/result/left_rot_new.txt\n",
"[3000. 31.83662219 0.97459412 17.40796776]\n",
"[3200. 33.50484803 0.96855212 26.90188221]\n",
"[2700. 28.18612233 0.95591996 35.45384348]\n",
"[3000. 31.99535259 0.96571304 29.17493 ]\n",
"[2500. 30.1324881 0.9576707 33.16420816]\n",
"[2500. 31.30890902 0.96009192 28.94997299]\n",
"[2816.66666667 31.16072371 0.96375698 28.50880077]\n"
]
}
],
"source": [
"txt_list = glob('/HOME/HOME/Caixiang/GauHuman/result/knn5_*.txt')\n",
"# txt_list = glob('/HOME/HOME/Caixiang/GauHuman/result/*.txt')\n",
"txt_list = glob('/HOME/HOME/Caixiang/GauHuman/result/left_ro*.txt')\n",
"print(len(txt_list))\n",
"\n",
"info = True\n",
"for txt_path in txt_list[:]:\n",
" print(\"==========\")\n",
" print(txt_path)\n",
" # try:\n",
" method_max = []\n",
" file = open(txt_path, 'r')\n",
" data = file.read()\n",
" data = [i for i in data.split('\\n')]\n",
" metrics_np = []\n",
" data_max = []\n",
" for i in data:\n",
" if len(i)<=1:continue\n",
" if len(i)<9:\n",
" if len(metrics_np)>0:\n",
" data = np.array(metrics_np)\n",
" row_with_max_last_column = data[data[:, -1].argmin()]\n",
" if info:\n",
" print(row_with_max_last_column)\n",
" data_max.append(row_with_max_last_column)\n",
" metrics_np = []\n",
" else:\n",
" metrics = i.split(' ')\n",
" for i in metrics:\n",
" if len(i)<2:metrics.remove(i)\n",
" metrics = [float(i) for i in metrics]\n",
" metrics_np.append(metrics)\n",
"for txt_path in txt_list:\n",
" try:\n",
" print(\"==========\")\n",
" print(txt_path)\n",
" # try:\n",
" method_max = []\n",
" file = open(txt_path, 'r')\n",
" data = file.read()\n",
" data = [i for i in data.split('\\n')]\n",
" metrics_np = []\n",
" data_max = []\n",
" for i in data:\n",
" if len(i)<=1:continue\n",
" if len(i)<9:\n",
" if len(metrics_np)>0:\n",
" data = np.array(metrics_np)\n",
" row_with_max_last_column = data[data[:, -1].argmin()]\n",
" if info:\n",
" print(row_with_max_last_column)\n",
" data_max.append(row_with_max_last_column)\n",
" metrics_np = []\n",
" else:\n",
" metrics = i.split(' ')\n",
" for i in metrics:\n",
" if len(i)<2:metrics.remove(i)\n",
" metrics = [float(i) for i in metrics]\n",
" metrics_np.append(metrics)\n",
"\n",
" data = np.array(metrics_np)\n",
" row_with_max_last_column = data[data[:, -1].argmin()]\n",
" if info:\n",
" print(row_with_max_last_column)\n",
" data_max.append(row_with_max_last_column)\n",
" data = np.array(metrics_np)\n",
" row_with_max_last_column = data[data[:, -1].argmin()]\n",
" if info:\n",
" print(row_with_max_last_column)\n",
" data_max.append(row_with_max_last_column)\n",
"\n",
" data_max = np.array(data_max)\n",
" max = np.sum(data_max,axis=0)/data_max.shape[0]\n",
" method_max.append(max)\n",
" print(max)\n",
" # except:\n",
" # print('jump over ',txt_path)\n"
" data_max = np.array(data_max)\n",
" max = np.sum(data_max,axis=0)/data_max.shape[0]\n",
" method_max.append(max)\n",
" print(max)\n",
" except:\n",
" print('jump over ',txt_path)\n"
]
},
{
Expand Down
88 changes: 85 additions & 3 deletions nets/mlp_delta_body_pose.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ def __init__(self,device='cuda'):
block_mlps += [nn.Linear(mlp_width, 3 * self.num_joints)]

self.block_mlps = nn.Sequential(*block_mlps)

# init the weights of the last layer as very small value
# -- at the beginning, we hope the rotation matrix can be identity
init_val = 1e-5
Expand All @@ -33,8 +32,8 @@ def __init__(self,device='cuda'):


def forward(self,feature):
joint_F = self.block_mlps(feature[:, 3:]).view(-1, 3) # (Joints, 3, 3)
joint_F = self.rodriguez(joint_F)
joint_F = self.block_mlps(feature[:, 3:]).view(-1, 3) #(1,69)->mlp->(1,69)->(23,3)
joint_F = self.rodriguez(joint_F) # (23,3) -> (23,3,3)

joint_U, joint_S, joint_V = torch.svd(joint_F) # (Joints, 3, 3), (Joints, 3), (Joints, 3, 3)

Expand All @@ -46,6 +45,89 @@ def forward(self,feature):
}


# class Autoregression(nn.Module):
# def __init__(self,device='cuda'):
# super(Autoregression,self).__init__()
# self.device = device
# mlp_depth=2
# self.num_joints = 23
# embedding_size = 69
# # mlp_width = 128+9 * self.num_joints
# mlp_width = 128
# out_dim = 3
# block_mlps = [nn.Linear(embedding_size, mlp_width), nn.ReLU()]
# for _ in range(0, mlp_depth-1):
# block_mlps += [nn.Linear(mlp_width, mlp_width), nn.ReLU()]

# block_mlps += [nn.Linear(mlp_width, 3 * self.num_joints)]

# self.block_mlps = nn.Sequential(*block_mlps)

# # init the weights of the last layer as very small value
# # -- at the beginning, we hope the rotation matrix can be identity
# init_val = 1e-5
# # last_layer = self.block_mlps[-1]
# # last_layer.weight.data.uniform_(-init_val, init_val)
# # last_layer.bias.data.zero_()
# self.rodriguez = RodriguesModule()
# self.fc_pose = []
# self.parents_dict = self.immediate_parent_to_all_ancestors()
# for joint in range(self.num_joints):
# num_parents = len(self.parents_dict[joint])
# input_dim = 3 + num_parents * out_dim #(9 + 3 + 9)
# # fc = nn.Sequential(nn.Linear(input_dim, mlp_width // 2),
# # self.activation,
# # nn.Linear(mlp_width // 2, self.joint_dim))
# # nn.Linear(embed_dim // 2, 3))
# # nn.Linear(embed_dim // 2, 9)))
# fc = nn.Sequential(nn.Linear(input_dim,out_dim))
# fc[-1].weight.data.uniform_(-init_val, init_val)
# fc[-1].bias.data.zero_()
# self.fc_pose.append(fc)
# self.fc_pose = nn.Sequential(*self.fc_pose)

# def immediate_parent_to_all_ancestors(self,immediate_parents=[-1,0,0,0,1,2,3,4,5,6,7,8,9,9,9,12,13,14,16,17,18,19,20,21]):
# """
# :param immediate_parents: list with len = num joints, contains index of each joint's parent.
# - includes root joint, but its parent index is -1.
# :return: ancestors_dict: dict of lists, dict[joint] is ordered list of parent joints.
# - DOES NOT INCLUDE ROOT JOINT! Joint 0 here is actually joint 1 in SMPL.
# """
# ancestors_dict = defaultdict(list)
# for i in range(1, len(immediate_parents)): # Excluding root joint
# joint = i - 1
# immediate_parent = immediate_parents[i] - 1
# if immediate_parent >= 0:
# ancestors_dict[joint] += [immediate_parent] + ancestors_dict[immediate_parent]
# return ancestors_dict

# def forward(self,feature):
# joint_F = self.block_mlps(feature[:, 3:])

# joint_F = joint_F.reshape(1,23,3)
# auto_joint_F = torch.zeros_like(joint_F,device=joint_F.device)

# for joint in range(self.num_joints):
# parents = self.parents_dict[joint]
# fc_joint = self.fc_pose[joint]
# embed = joint_F[:, joint]
# if len(parents) > 0:
# embed = embed.unsqueeze(1)
# parents_embed = joint_F[:, parents]
# auto_joint_F[:, joint] = fc_joint(torch.cat([embed, parents_embed], dim=1).reshape(1,-1))
# else:
# auto_joint_F[:, joint] = fc_joint(embed)

# auto_joint_F = self.rodriguez(auto_joint_F[0])

# joint_U, joint_S, joint_V = torch.svd(auto_joint_F) # (Joints, 3, 3), (Joints, 3), (Joints, 3, 3)

# return {
# "Rs": auto_joint_F,
# "pose_U":joint_U,
# "pose_S":joint_S,
# "pose_V":joint_V,
# }

class Autoregression_autoregression(nn.Module):
def __init__(self,device='cuda'):
Expand Down
6 changes: 3 additions & 3 deletions nets/mlp_delta_weight_lbs.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ def __init__(self, feature_dim=24,rot_dim = 9, num_heads=3):
self.key = nn.Linear(rot_dim, rot_dim)
self.value = nn.Linear(rot_dim, rot_dim)
self.out_layer = nn.Linear(feature_dim,feature_dim)
self.gate_proj = nn.Linear(feature_dim,feature_dim)

def forward(self, query, key):
features = xyz_embedder(query)
Expand All @@ -46,9 +47,8 @@ def forward(self, query, key):

output = torch.matmul(attention,V.transpose(-2, -1))

# output = output*query
# output = self.out_layer(output)

# gate = torch.sigmoid(self.gate_proj(query))
# output = gate * self.out_layer(output)
return output

# class CrossAttention_lbs_without_bias(nn.Module):
Expand Down
2 changes: 1 addition & 1 deletion render.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def render_sets(dataset : ModelParams, iteration : int, pipeline : PipelineParam
args.data_name = data_name
if False:
# args.exp_name=f'zju_mocap_refine/my_{args.data_name}_baseline'
args.exp_name=f'/HOME/HOME/Caixiang/GauHuman_baseline_2/output/zju_mocap_refine/my_{args.data_name}_baseline'
args.exp_name=f'/HOME/HOME/Caixiang/GauHuman_baseline/output/zju_mocap_refine/my_{args.data_name}_baseline'
args.iteration='1200'
else:
args.exp_name=f'/HOME/HOME/Caixiang/GauHuman/output/zju_mocap_refine/my_{args.data_name}_{log_name}'
Expand Down
Loading

0 comments on commit 3967df0

Please sign in to comment.