Skip to content

Commit

Permalink
remove log hook of learning rate and change worker in cpu to false
Browse files Browse the repository at this point in the history
  • Loading branch information
macanv committed Feb 11, 2019
1 parent a286935 commit f221eb0
Show file tree
Hide file tree
Showing 3 changed files with 2 additions and 2 deletions.
1 change: 1 addition & 0 deletions bert_base/server/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -424,6 +424,7 @@ def ner_model_fn(features, labels, mode, params):
'encodes': pred_ids[0]
})

# 0 表示只使用CPU 1 表示使用GPU
config = tf.ConfigProto(device_count={'GPU': 0 if self.device_id < 0 else 1})
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
Expand Down
2 changes: 1 addition & 1 deletion bert_base/server/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def get_args_parser():
group3.add_argument('-priority_batch_size', type=int, default=16,
help='batch smaller than this size will be labeled as high priority,'
'and jumps forward in the job queue')
group3.add_argument('-cpu', action='store_true', default=True,
group3.add_argument('-cpu', action='store_true', default=False,
help='running on CPU (default on GPU)')
group3.add_argument('-xla', action='store_true', default=False,
help='enable XLA compiler (experimental)')
Expand Down
1 change: 0 additions & 1 deletion bert_base/train/bert_lstm_ner.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,6 @@ def model_fn(features, labels, mode, params):
total_loss, learning_rate, num_train_steps, num_warmup_steps, False)
hook_dict = {}
hook_dict['loss'] = total_loss
hook_dict['learning_rate'] = learning_rate
hook_dict['global_steps'] = tf.train.get_or_create_global_step()
logging_hook = tf.train.LoggingTensorHook(
hook_dict, every_n_iter=args.save_summary_steps)
Expand Down

0 comments on commit f221eb0

Please sign in to comment.