Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

performance issue #2

Open
soans1994 opened this issue Sep 23, 2021 · 0 comments
Open

performance issue #2

soans1994 opened this issue Sep 23, 2021 · 0 comments

Comments

@soans1994
Copy link

soans1994 commented Sep 23, 2021

hello author,

i have implemented similar fcn with vgg layers. But my fcn32 works better than fcn16 and 8. why is this?. Can you please take a look on my code.
thank you

import torch
import torch.nn as nn
import torchvision.models as models
from pytorch_model_summary import summary

vgg16 = models.vgg16(pretrained=True)
for param in vgg16.features.parameters():
param.requires_grad = False
#False Total params: 185,771,904 Trainable params: 171,057,216 Non-trainable params: 14,714,688
#true Total params: 185,771,904 Trainable params: 185,771,904 Non-trainable params: 0

class fcn(nn.Module):
def init(self):
super(fcn, self).init()
self.features = vgg16.features
self.classifier = nn.Sequential(
nn.Conv2d(512, 4096, 7),
nn.ReLU(inplace=True),
#nn.Dropout2d(),
nn.Conv2d(4096, 4096, 1),
nn.ReLU(inplace=True),
#nn.Dropout2d(),
nn.Conv2d(4096, 32, 1),
nn.ConvTranspose2d(32, 32, 224, stride=32)
)

def forward(self, x):
x = self.features(x)#/32
x = self.classifier(x)
#print(x.shape)
return x

class fcn16(nn.Module):
def init(self):
super(fcn16, self).init()
self.features = vgg16.features
self.classifier = nn.Sequential(
nn.Conv2d(512, 4096, 7),
nn.ReLU(inplace=True),
nn.Conv2d(4096, 4096, 1),
nn.ReLU(inplace=True),
nn.Conv2d(4096, 32, 1)
)
self.score_pool4 = nn.Conv2d(512, 32, 1)
self.upscore2 = nn.ConvTranspose2d(32, 32, 14, stride=2, bias=False)
self.upscore16 = nn.ConvTranspose2d(32, 32, 16, stride=16, bias=False)

def forward(self, x):
pool4 = self.features:-7#512 features /16
pool5 = self.features-7:#512 features /16/2=/32
pool5_upscored = self.upscore2(self.classifier(pool5))#32 class features stride2 /32*2=/16
pool4_scored = self.score_pool4(pool4)#32 features /16
combined = pool4_scored + pool5_upscored
#combined = torch.cat([pool4_scored, pool5_upscored])
res = self.upscore16(combined)# /1
return res

class fcn8(nn.Module):
def init(self):
super(fcn8, self).init()
self.features = vgg16.features
self.classifier = nn.Sequential(
nn.Conv2d(512, 4096, 7),
nn.ReLU(inplace=True),
nn.Conv2d(4096, 4096, 1),
nn.ReLU(inplace=True),
nn.Conv2d(4096, 32, 1)
)
self.score_pool4 = nn.Conv2d(512, 32, 1)
self.score_pool3 = nn.Conv2d(256, 32, 1)
self.upscore2 = nn.ConvTranspose2d(32, 32, 14, stride=2, bias=False)
self.upscore3 = nn.ConvTranspose2d(32, 32, 2, stride=2, bias=False)
#self.upscore16 = nn.ConvTranspose2d(32, 32, 16, stride=16, bias=False)
self.upscore8 = nn.ConvTranspose2d(32, 32, 8, stride=8, bias=False)

def forward(self, x):
pool3 = self.features:-14#256 features /8
pool4 = self.features-14:-7#512 features /8/2=16
pool5 = self.features-7:#512 features /16/2=/32
pool5_upscored = self.upscore2(self.classifier(pool5))#32 class features stride2 /322=/16
pool4_scored = self.score_pool4(pool4)#32 class features /16
pool3_scored = self.score_pool3(pool3)#32 class features /8
combined = pool4_scored + pool5_upscored #/16
#print(combined.shape)
combined_upscored = self.upscore3(combined)#32 class features stride2 /16
2=/8
#print(combined_upscored.shape)
combined2 = pool3_scored + combined_upscored
#print(combined2.shape)
#res = self.upscore16(combined)#/1
res = self.upscore8(combined2)#/1
#print(res.shape)
return res

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant