Skip to content

Commit

Permalink
Merge pull request #454 from cpaxton/feature/scripts-conditional
Browse files Browse the repository at this point in the history
Updates to scripts and to conditional image tests
  • Loading branch information
cpaxton authored Feb 23, 2018
2 parents f874953 + d3b5a80 commit 54a0e55
Show file tree
Hide file tree
Showing 41 changed files with 313 additions and 202 deletions.
11 changes: 10 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,13 @@
*.h5
*.so
*.ttt
*.json
*.json

# Tags file
tags

# Slurm files
slurm-*.out

# orig files from vim
*.orig
44 changes: 44 additions & 0 deletions commands/multi_secondaries_no_ros.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#!/usr/bin/env bash
echo "------------------------------------"
echo "Running $0"
echo "This will train the following:"
echo " - V(x)"
echo " - p(action | x)"
echo " - pi(x, action) ~ goal_pose"
echo " - pi(x, action) ~ u"
echo "------------------------------------"
./costar_models/scripts/ctp_model_tool \
--model secondary --data_file data.h5f \
--epochs 50 \
--lr 0.001 --dropout_rate 0.1 \
--features multi --batch_size 64 \
--steps_per_epoch 500 --submodel value $1

./costar_models/scripts/ctp_model_tool \
--model secondary --data_file data.h5f \
--epochs 50 \
--lr 0.001 --dropout_rate 0.1 --features multi \
--batch_size 64 --steps_per_epoch 500 --submodel next $1

./costar_models/scripts/ctp_model_tool \
--model secondary --data_file data.h5f \
--epochs 50 \
--lr 0.001 --dropout_rate 0.1 --features multi \
--batch_size 64 --steps_per_epoch 500 --submodel q $1

./costar_models/scripts/ctp_model_tool \
--model secondary --data_file data.h5f \
--epochs 100 \
--lr 0.001 --dropout_rate 0.1 \
--success_only \
--features multi --batch_size 64 \
--steps_per_epoch 500 --submodel actor $1

./costar_models/scripts/ctp_model_tool \
--model secondary --data_file data.h5f \
--epochs 200 \
--lr 0.001 --dropout_rate 0.1 \
--features multi --batch_size 64 \
--success_only \
--steps_per_epoch 500 --submodel pose $1

33 changes: 22 additions & 11 deletions costar_models/python/costar_models/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,12 @@ class AbstractAgentBasedModel(object):
'''

def makeName(self, prefix, submodel=None):
name = os.path.join(self.model_directory, prefix) + "_model"
dir = self.model_directory
if submodel is not None and self.reqs_directory is not None:
dir = self.reqs_directory
name = os.path.join(dir, prefix) + "_model"
if self.features is not None:
name += "_%s"%self.features
name += "_%s"%self.features
if submodel is not None:
name += "_%s.h5f"%submodel
return name
Expand Down Expand Up @@ -65,7 +68,7 @@ def __init__(self, taskdef=None,
use_batchnorm=1,
hypothesis_dropout=False,
dense_representation=True,
skip_connections=0,
skip_connections=1,
use_noise=False,
load_pretrained_weights=False,
retrain=True,
Expand All @@ -81,8 +84,12 @@ def __init__(self, taskdef=None,
clip_weights=0, use_wasserstein=False,
option_num=None, # for policy model
unique_id="", # for status file
task=None, robot=None, model="", model_directory="./", *args,
**kwargs):
task=None,
robot=None,
model="",
model_directory="./",
reqs_directory=None,
*args, **kwargs):

if lr == 0 or lr < 1e-30:
raise RuntimeError('You probably did not mean to set ' + \
Expand Down Expand Up @@ -124,6 +131,9 @@ def __init__(self, taskdef=None,
self.clipnorm = float(clipnorm)
self.taskdef = taskdef
self.model_directory = os.path.expanduser(model_directory)
self.reqs_directory = None
if reqs_directory is not None:
self.reqs_directory = os.path.expanduser(reqs_directory)
self.name = self.makeName(self.name_prefix)
self.num_generator_files = num_generator_files
self.dropout_rate = dropout_rate
Expand All @@ -143,7 +153,7 @@ def __init__(self, taskdef=None,
self.save_model = save_model
self.hidden_size = hidden_size
self.option_num = option_num

if self.noise_dim < 1:
self.use_noise = False

Expand All @@ -160,7 +170,7 @@ def __init__(self, taskdef=None,
# Unique id for status file
self.unique_id = unique_id



# default: store the whole model here.
# NOTE: this may not actually be where you want to save it.
Expand All @@ -175,6 +185,7 @@ def __init__(self, taskdef=None,
print("Task =", self.task)
print("Model type =", model)
print("Model directory =", self.model_directory)
print("Reqs directory =", self.reqs_directory)
print("Models saved with prefix =", self.name)
print("Unique id for status file =", self.unique_id)
print("-----------------------------------------------------------")
Expand Down Expand Up @@ -239,9 +250,9 @@ def trainFromGenerators(self, train_generator, test_generator, data=None):
def _getData(self, *args, **kwargs):
'''
This function should process all the data you need for a generator.
'''
'''
raise NotImplementedError('_getData() requires a dataset.')

def trainGenerator(self, dataset):
return self._yieldLoop(dataset.sampleTrain)

Expand All @@ -254,7 +265,7 @@ def testGenerator(self, dataset):

def _yieldLoop(self, sampleFn):
'''
This helper function runs in a loop infinitely, executing some callable
This helper function runs in a loop infinitely, executing some callable
to extract a set of feature information from a dataset file, and then
performs any necessary preprocessing on it.
Expand Down Expand Up @@ -430,7 +441,7 @@ def __init__(self, taskdef, *args, **kwargs):
self.baseline = None
# All low-level policies pi(x,o) --> u
self.policies = []

def _makeSupervisor(self, feature):
'''
This needs to create a supervisor. This one maps from input to the
Expand Down
29 changes: 15 additions & 14 deletions costar_models/python/costar_models/conditional_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ def __init__(self, *args, **kwargs):
self.rep_size = 256
self.num_transforms = 3
self.transform_model = None
self.skip_connections = False
self.save_encoder_decoder = self.retrain

if self.use_noise:
Expand All @@ -73,22 +72,15 @@ def _makePredictor(self, features):
label_in = Input((1,))
ins = [img0_in, img_in]

if self.skip_connections:
encoder = self._makeImageEncoder2(img_shape)
decoder = self._makeImageDecoder2(self.hidden_shape)
else:
encoder = self._makeImageEncoder(img_shape)
decoder = self._makeImageDecoder(self.hidden_shape)
encoder = self._makeImageEncoder(img_shape)
decoder = self._makeImageDecoder(self.hidden_shape)

LoadEncoderWeights(self, encoder, decoder)

# =====================================================================
# Load the arm and gripper representation
if self.skip_connections:
h, s32, s16, s8 = encoder([img0_in, img_in])
else:
h = encoder([img_in])
h0 = encoder(img0_in)
h = encoder([img_in])
h0 = encoder(img0_in)

if self.validate:
self.loadValidationModels(arm_size, gripper_size, h0, h)
Expand Down Expand Up @@ -132,7 +124,7 @@ def _makePredictor(self, features):
if self.no_disc:
disc_wt = 0.
else:
disc_wt = 1e-4
disc_wt = 1e-3
if self.no_disc:
train_predictor = Model(ins + [label_in],
[image_out, image_out2] + enc_outs)
Expand Down Expand Up @@ -185,26 +177,31 @@ def loadValidationModels(self, arm_size, gripper_size, h0, h):
arm_gripper = Concatenate()([arm_in, gripper_in])
label_in = Input((1,))

self.value_model = GetValueModel(h, self.num_options, 64,
print(">>> VALUE MODEL")
self.value_model = GetValueModel(h, self.num_options, 128,
self.decoder_dropout_rate)
self.value_model.compile(loss="mae", optimizer=self.getOptimizer())
self.value_model.load_weights(self.makeName("secondary", "value"))

print(">>> NEXT MODEL")
self.next_model = GetNextModel(h, self.num_options, 128,
self.decoder_dropout_rate)
self.next_model.compile(loss="mae", optimizer=self.getOptimizer())
self.next_model.load_weights(self.makeName("secondary", "next"))

print(">>> ACTOR MODEL")
self.actor = GetActorModel(h, self.num_options, arm_size, gripper_size,
self.decoder_dropout_rate)
self.actor.compile(loss="mae",optimizer=self.getOptimizer())
self.actor.load_weights(self.makeName("secondary", "actor"))

print(">>> POSE MODEL")
self.pose_model = GetPoseModel(h, self.num_options, arm_size, gripper_size,
self.decoder_dropout_rate)
self.pose_model.compile(loss="mae",optimizer=self.getOptimizer())
self.pose_model.load_weights(self.makeName("secondary", "pose"))

print(">>> Q MODEL")
self.q_model = GetNextModel(h, self.num_options, 128,
self.decoder_dropout_rate)
self.q_model.compile(loss="mae", optimizer=self.getOptimizer())
Expand Down Expand Up @@ -238,6 +235,10 @@ def pnext(self, hidden0, hidden, prev_option):
#p /= np.sum(p)
return p

def q(self, hidden0, hidden, prev_option):
p = self.q_model.predict([hidden0, hidden, prev_option])
return p

def value(self, hidden0, hidden):
#v = self.value_model.predict([h0, hidden, prev_option])
v = self.value_model.predict([hidden0, hidden])
Expand Down
29 changes: 11 additions & 18 deletions costar_models/python/costar_models/conditional_image_gan.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ def __init__(self, *args, **kwargs):
self.rep_size = 256
self.num_transforms = 3
self.do_all = True
self.skip_connections = False
self.save_encoder_decoder = self.retrain
self.noise_iters = 2

Expand All @@ -71,12 +70,8 @@ def _makePredictor(self, features):
next_option2_in = Input((1,), name="next_option2_in")
ins = [img0_in, img_in, next_option_in, next_option2_in]

if self.skip_connections:
encoder = self._makeImageEncoder2(img_shape)
decoder = self._makeImageDecoder2(self.hidden_shape)
else:
encoder = self._makeImageEncoder(img_shape)
decoder = self._makeImageDecoder(self.hidden_shape)
encoder = self._makeImageEncoder(img_shape)
decoder = self._makeImageDecoder(self.hidden_shape)

LoadEncoderWeights(self, encoder, decoder, gan=True)

Expand All @@ -87,11 +82,8 @@ def _makePredictor(self, features):
z2 = Input((self.noise_dim,), name="z2_in")
ins += [z1, z2]

if self.skip_connections:
h, s32, s16, s8 = encoder([img0_in, img_in])
else:
h = encoder([img_in])
h0 = encoder(img0_in)
h = encoder([img_in])
h0 = encoder(img0_in)

# =====================================================================
# Actually get the right outputs
Expand Down Expand Up @@ -132,7 +124,7 @@ def _makePredictor(self, features):
# =====================================================================
# And adversarial model
loss = wasserstein_loss if self.use_wasserstein else "binary_crossentropy"
weights = [0.01, 0.01, 1.] if self.use_wasserstein else [100., 100., 1.]
weights = [0.1, 0.1, 1.] if self.use_wasserstein else [100., 100., 1.]

model = Model(ins, [image_out, image_out2, is_fake])
model.compile(
Expand Down Expand Up @@ -175,8 +167,7 @@ def _makeImageDiscriminator(self, img_shape):
option = Input((1,),name="disc_options")
option2 = Input((1,),name="disc2_options")
ins = [img0, img, option, option2, img_goal, img_goal2]
dr = self.dropout_rate
dr = 0
dr = self.dropout_rate*0.

x0 = AddConv2D(img0, 64, [4,4], 1, dr, "same", lrelu=True, bn=False)
xobs = AddConv2D(img, 64, [4,4], 1, dr, "same", lrelu=True, bn=False)
Expand Down Expand Up @@ -209,9 +200,11 @@ def _makeImageDiscriminator(self, img_shape):
x = Flatten()(x)
x = AddDense(x, 1, "linear", 0., output=True, bn=False)
else:
x = AddConv2D(x, 1, [1,1], 1, 0., "same", activation="sigmoid",
bn=False)
x = GlobalAveragePooling2D()(x)
#x = AddConv2D(x, 1, [1,1], 1, 0., "same", activation="sigmoid",
# bn=False)
#x = GlobalAveragePooling2D()(x)
x = Flatten()(x)
x = AddDense(x, 1, "sigmoid", 0., output=True, bn=False)

discrim = Model(ins, x, name="image_discriminator")
self.lr *= 2.
Expand Down
Loading

0 comments on commit 54a0e55

Please sign in to comment.