Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update to run quick_start.py with PyTorch 2.2 and Python 3.11 #88

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 14 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,20 @@ pip install numpy scipy matplotlib tensorboard open3d==0.9.0 opencv-python "rtre

#### For Windows user

The code has been tested on Windows 10 with cuda 10.1. The most important difference from Linux setup is, you need to download Windows-compiled Rtree from [here](https://www.lfd.uci.edu/~gohlke/pythonlibs/#rtree), and install it by
`pip install Rtree‑0.9.4‑cp37‑cp37m‑win_amd64.whl` (64-bit system) or
`pip install Rtree‑0.9.4‑cp37‑cp37m‑win32.whl` (32-bit system). Other libraries can be installed in the same way as Linux setup instructions.
The code has been tested on Windows 11 with Python 3.11, Pytorch 2.2.1, and Cuda 12.4
``` powershell
# install pytorch
pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cu121

# install torch geometric
pip install torch_scatter torch_sparse torch_cluster torch_spline_conv -f https://data.pyg.org/whl/torch-2.2.1+cu121.html

pip install torch_geometric==2.5.1

# install other dependencies
pip install tensorboard==2.16.2 opencv-python==4.9.0.80 rtree==1.2.0 trimesh==4.2.0 open3d==0.18.0
```




Expand Down
79 changes: 59 additions & 20 deletions models/PairCls_GCN.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,36 @@
#-------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Name: PairCls_GCN.py
# Purpose: definition of connectivity prediction module.
# RigNet Copyright 2020 University of Massachusetts
# RigNet is made available under General Public License Version 3 (GPLv3), or under a Commercial License.
# Please see the LICENSE README.txt file in the main directory for more information and instruction on using and licensing RigNet.
#-------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
import numpy as np
import torch
from models.gcn_basic_modules import MLP, GCU
from torch.nn import Sequential, Dropout, Linear
from torch_scatter import scatter_max
from torch_geometric.nn import PointConv, fps, radius, global_max_pool, knn_interpolate
from torch_geometric.nn import (
PointNetConv,
fps,
radius,
global_max_pool,
knn_interpolate,
)


class SAModule(torch.nn.Module):
def __init__(self, ratio, r, nn):
super(SAModule, self).__init__()
self.ratio = ratio
self.r = r
self.conv = PointConv(nn)
self.conv = PointNetConv(nn)

def forward(self, x, pos, batch):
idx = fps(pos, batch, ratio=self.ratio)
row, col = radius(pos, pos[idx], self.r, batch, batch[idx],
max_num_neighbors=64)
row, col = radius(
pos, pos[idx], self.r, batch, batch[idx], max_num_neighbors=64
)
edge_index = torch.stack([col, row], dim=0)
x = self.conv(x, (pos, pos[idx]), edge_index)
pos, batch = pos[idx], batch[idx]
Expand Down Expand Up @@ -58,7 +65,7 @@ def forward(self, x, pos, batch, x_skip, pos_skip, batch_skip):


class ShapeEncoder(torch.nn.Module):
def __init__(self, aggr='max'):
def __init__(self, aggr="max"):
super(ShapeEncoder, self).__init__()
self.gcu_1 = GCU(in_channels=3, out_channels=64, aggr=aggr)
self.gcu_2 = GCU(in_channels=64, out_channels=128, aggr=aggr)
Expand All @@ -77,18 +84,18 @@ def forward(self, data):
class JointEncoder(torch.nn.Module):
def __init__(self):
super(JointEncoder, self).__init__()
#self.mlp_1 = MLP([3, 64, 128, 1024])
#self.mlp_2 = MLP([1024, 256, 128])
# self.mlp_1 = MLP([3, 64, 128, 1024])
# self.mlp_2 = MLP([1024, 256, 128])

self.sa1_module_joints = SAModule(0.999, 0.4, MLP([3, 64, 64, 128]))
self.sa2_module_joints = SAModule(0.33, 0.6, MLP([128 + 3, 128, 128, 256]))
self.sa3_module_joints = GlobalSAModule(MLP([256 + 3, 256, 256, 512, 256, 128]))

def forward(self, joints, joints_batch):
'''x1 = self.mlp_1(joints_norepeat)
"""x1 = self.mlp_1(joints_norepeat)
x_glb, _ = scatter_max(x1, joints_batch, dim=0)
x_glb = self.mlp_2(x_glb)
return x_glb'''
return x_glb"""

sa0_joints = (None, joints, joints_batch)
sa1_joints = self.sa1_module_joints(*sa0_joints)
Expand All @@ -101,25 +108,57 @@ def forward(self, joints, joints_batch):
class PairCls(torch.nn.Module):
def __init__(self):
super(PairCls, self).__init__()
self.expand_joint_feature = Sequential(MLP([8, 32, 64, 128, 256]))
self.expand_joint_feature = Sequential(MLP([8, 32, 64, 128, 256]))
self.shape_encoder = ShapeEncoder()
self.joint_encoder = JointEncoder()
input_concat_dim = 448
self.mix_transform = Sequential(MLP([input_concat_dim, 128, 64]), Dropout(0.7), Linear(64, 1))
self.mix_transform = Sequential(
MLP([input_concat_dim, 128, 64]), Dropout(0.7), Linear(64, 1)
)

def forward(self, data, permute_joints=True):
joint_feature = self.joint_encoder(data.joints, data.joints_batch)
joint_feature = torch.repeat_interleave(joint_feature, torch.bincount(data.pairs_batch), dim=0)
joint_feature = torch.repeat_interleave(
joint_feature, torch.bincount(data.pairs_batch), dim=0
)
shape_feature = self.shape_encoder(data)
shape_feature = torch.repeat_interleave(shape_feature, torch.bincount(data.pairs_batch), dim=0)
shape_feature = torch.repeat_interleave(
shape_feature, torch.bincount(data.pairs_batch), dim=0
)

if permute_joints:
rand_permute = (torch.rand(len(data.pairs))>=0.5).long().to(data.pairs.device)
joints_pair = torch.cat((data.joints[torch.gather(data.pairs, dim=1, index=rand_permute.unsqueeze(dim=1)).squeeze(dim=1).long()],
data.joints[torch.gather(data.pairs, dim=1, index=1-rand_permute.unsqueeze(dim=1)).squeeze(dim=1).long()],
data.pair_attr[:, :-1]), dim=1)
rand_permute = (
(torch.rand(len(data.pairs)) >= 0.5).long().to(data.pairs.device)
)
joints_pair = torch.cat(
(
data.joints[
torch.gather(
data.pairs, dim=1, index=rand_permute.unsqueeze(dim=1)
)
.squeeze(dim=1)
.long()
],
data.joints[
torch.gather(
data.pairs, dim=1, index=1 - rand_permute.unsqueeze(dim=1)
)
.squeeze(dim=1)
.long()
],
data.pair_attr[:, :-1],
),
dim=1,
)
else:
joints_pair = torch.cat((data.joints[data.pairs[:,0].long()], data.joints[data.pairs[:,1].long()], data.pair_attr[:, :-1]), dim=1)
joints_pair = torch.cat(
(
data.joints[data.pairs[:, 0].long()],
data.joints[data.pairs[:, 1].long()],
data.pair_attr[:, :-1],
),
dim=1,
)
pair_feature = self.expand_joint_feature(joints_pair)
pair_feature = torch.cat((shape_feature, joint_feature, pair_feature), dim=1)
pre_label = self.mix_transform(pair_feature)
Expand Down
45 changes: 29 additions & 16 deletions models/ROOT_GCN.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,37 @@
#-------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Name: Root_GCN.py
# Purpose: definition of root prediction module.
# RigNet Copyright 2020 University of Massachusetts
# RigNet is made available under General Public License Version 3 (GPLv3), or under a Commercial License.
# Please see the LICENSE README.txt file in the main directory for more information and instruction on using and licensing RigNet.
#-------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
import torch
from models.gcn_basic_modules import MLP, GCU
from torch_scatter import scatter_max
from torch.nn import Sequential, Linear
from torch_geometric.nn import PointConv, fps, radius, global_max_pool, knn_interpolate
__all__ = ['ROOTNET']
from torch_geometric.nn import (
PointNetConv,
fps,
radius,
global_max_pool,
knn_interpolate,
)

__all__ = ["ROOTNET"]


class SAModule(torch.nn.Module):
def __init__(self, ratio, r, nn):
super(SAModule, self).__init__()
self.ratio = ratio
self.r = r
self.conv = PointConv(nn)
self.conv = PointNetConv(nn)

def forward(self, x, pos, batch):
idx = fps(pos, batch, ratio=self.ratio)
row, col = radius(pos, pos[idx], self.r, batch, batch[idx],
max_num_neighbors=64)
row, col = radius(
pos, pos[idx], self.r, batch, batch[idx], max_num_neighbors=64
)
edge_index = torch.stack([col, row], dim=0)
x = self.conv(x, (pos, pos[idx]), edge_index)
pos, batch = pos[idx], batch[idx]
Expand Down Expand Up @@ -57,13 +66,13 @@ def forward(self, x, pos, batch, x_skip, pos_skip, batch_skip):


class ShapeEncoder(torch.nn.Module):
def __init__(self, aggr='max'):
def __init__(self, aggr="max"):
super(ShapeEncoder, self).__init__()
self.gcu_1 = GCU(in_channels=3, out_channels=64, aggr=aggr)
self.gcu_2 = GCU(in_channels=64, out_channels=128, aggr=aggr)
self.gcu_3 = GCU(in_channels=128, out_channels=256, aggr=aggr)
self.mlp_glb = MLP([(64 + 128 + 256), 128])
#self.mlp_glb = MLP([(64 + 128 + 256), 512])
# self.mlp_glb = MLP([(64 + 128 + 256), 512])

def forward(self, data):
x_1 = self.gcu_1(data.pos, data.tpl_edge_index, data.geo_edge_index)
Expand All @@ -77,9 +86,9 @@ def forward(self, data):
class JointEncoder(torch.nn.Module):
def __init__(self):
super(JointEncoder, self).__init__()
'''self.mlp_1 = MLP([4, 64])
"""self.mlp_1 = MLP([4, 64])
self.mlp_2 = MLP([64, 128, 1024])
self.mlp_3 = MLP([1088, 512, 256, 128, 64])'''
self.mlp_3 = MLP([1088, 512, 256, 128, 64])"""
self.sa1_joint = SAModule(0.999, 0.4, MLP([4, 64, 64, 128]))
self.sa2_joint = SAModule(0.33, 0.6, MLP([128 + 3, 128, 128, 256]))
self.sa3_joint = GlobalSAModule(MLP([256 + 3, 256, 256, 512]))
Expand All @@ -88,12 +97,12 @@ def __init__(self):
self.fp1_joint = FPModule(3, MLP([128 + 1, 128, 128]))

def forward(self, x, pos, batch):
'''x1= self.mlp_1(torch.cat((pos, x), dim=1))
"""x1= self.mlp_1(torch.cat((pos, x), dim=1))
x2 = self.mlp_2(x1)
x_glb, _ = scatter_max(x2, batch, dim=0)
x_glb = torch.repeat_interleave(x_glb, torch.bincount(batch), dim=0)
x3 = self.mlp_3(torch.cat((x_glb, x1), dim=1))
return x3'''
return x3"""
sa0_joint = (x, pos, batch)
sa1_joint = self.sa1_joint(*sa0_joint)
sa2_joint = self.sa2_joint(*sa1_joint)
Expand All @@ -115,7 +124,7 @@ def forward(self, data, shuffle=True):
joints_label = []
joints_shuffle = []
for i in range(len(torch.unique(data.joints_batch))):
joint_i = data.joints[data.joints_batch==i]
joint_i = data.joints[data.joints_batch == i]
label_i = joint_i.new(torch.Size((joint_i.shape[0], 1))).zero_()
label_i[0, 0] = 1
# random shuffle
Expand All @@ -129,8 +138,12 @@ def forward(self, data, shuffle=True):
joints_label = torch.cat(joints_label)

x_glb_shape = self.shape_encoder(data)
shape_feature = torch.repeat_interleave(x_glb_shape, torch.bincount(data.joints_batch), dim=0)
joint_feature = self.joint_encoder(torch.abs(joints_shuffle[:,0:1]), joints_shuffle, data.joints_batch)
shape_feature = torch.repeat_interleave(
x_glb_shape, torch.bincount(data.joints_batch), dim=0
)
joint_feature = self.joint_encoder(
torch.abs(joints_shuffle[:, 0:1]), joints_shuffle, data.joints_batch
)
x_joint = torch.cat([shape_feature, joint_feature], dim=1)
x_joint = self.back_layers(x_joint)
return x_joint, joints_label
Loading