Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PyTorch frontend: fix handling of duplicate use of a model weight #5897

Merged
merged 1 commit into from
Jun 24, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 9 additions & 4 deletions python/tvm/relay/frontend/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -2335,6 +2335,7 @@ def convert_params(graph, state_dict):
params = {}
param_tensors = {}
packed_param_map = {}
vars_by_name = {}
seen = set()

for node in getattr_nodes:
Expand All @@ -2352,10 +2353,14 @@ def convert_params(graph, state_dict):
assert full_attr in state_dict, err_msg
packed_param_map[full_attr_node_name] = full_attr
elif full_attr in state_dict:
torch_tensor = state_dict[full_attr]
tensor, var = _get_tensor_and_var(torch_tensor,
full_attr)
param_tensors[full_attr] = tensor
if full_attr in vars_by_name:
var = vars_by_name[full_attr]
else:
torch_tensor = state_dict[full_attr]
tensor, var = _get_tensor_and_var(torch_tensor,
full_attr)
param_tensors[full_attr] = tensor
vars_by_name[full_attr] = var
params[full_attr_node_name] = var

return params, param_tensors, packed_param_map
Expand Down
18 changes: 18 additions & 0 deletions tests/python/frontend/pytorch/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -2390,6 +2390,23 @@ def test_weight_names():
assert set(params.keys()) == set(n for n, p in tm.named_parameters())


def test_duplicate_weight_use():
# The test cases doesn't make any sense as a neural network,
# the issue popped up in shared input/output embeddings of bert,
# but this is quicker
class Test(Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(5, 3)

def forward(self, x):
x = self.lin(x)
x = x @ self.lin.weight
return x

verify_model(Test(), input_data=[torch.randn(5, 5)])


def test_forward_matmul():
torch.set_grad_enabled(False)

Expand Down Expand Up @@ -2556,6 +2573,7 @@ def test_forward_pretrained_bert_base_uncased():
test_forward_traced_function()
test_forward_dtypes()
test_weight_names()
test_duplicate_weight_use()

# Single operator tests
test_forward_add()
Expand Down