Skip to content

Commit

Permalink
More fix
Browse files Browse the repository at this point in the history
  • Loading branch information
reminisce committed Aug 11, 2017
1 parent 5279aa1 commit 81d867e
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 5 deletions.
12 changes: 8 additions & 4 deletions benchmark/python/sparse_end2end.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ def next(self):


def get_sym(feature_dim):
initializer = mx.initializer.Normal()
x = mx.symbol.Variable("data", stype='csr')
norm_init = mx.initializer.Normal(sigma=0.01)
w = mx.symbol.Variable("w", shape=(feature_dim, args.output_dim), init=norm_init, stype='row_sparse')
Expand All @@ -97,7 +96,7 @@ def get_sym(feature_dim):
num_batch = args.num_batch
kvstore = args.kvstore
profiler = args.profiler > 0
batch_size = args.batch_size
batch_size = args.batch_size if args.num_gpu == 0 else args.num_gpu * args.batch_size
dummy_iter = args.dummy_iter
dataset = args.dataset
log_level = args.log_level
Expand Down Expand Up @@ -160,8 +159,11 @@ def get_sym(feature_dim):

# start profiler
if profiler:
import random
name = 'profile_output_' + str(num_worker) + '.json'
device = 'cpu'
if args.num_gpu > 0:
device = 'gpu' + str(args.num_gpu)
name = 'profile_' + args.dataset + '_' + device + '_nworker' + str(num_worker)\
+ '_batchsize' + str(args.batch_size) + '_outdim' + str(args.output_dim) + '.json'
mx.profiler.profiler_set_config(mode='all', filename=name)
mx.profiler.profiler_set_state('run')

Expand Down Expand Up @@ -207,6 +209,8 @@ def get_sym(feature_dim):
# accumulate prediction accuracy
mod.update_metric(metric, batch.label)
logging.info('epoch %d, %s' % (epoch, metric.get()))
if epoch == 0:
print "num_batches = ", nbatch
if profiler:
mx.profiler.profiler_set_state('stop')
end = time.time()
Expand Down
4 changes: 3 additions & 1 deletion src/executor/graph_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1151,7 +1151,9 @@ void GraphExecutor::InitDataEntryMemory(std::vector<NDArray>* shared_pool) {
CHECK_LE(nword, std::numeric_limits<nnvm::dim_t>::max());
// allocate float arrays
TShape shape{static_cast<nnvm::dim_t>(nword)};
NDArray nd(shape, ctx);
// TODO(junwu): adding delay_alloc=true to create nd
// is a temporary solution.
NDArray nd(shape, ctx, true);
data_pool_[i] = nd;
// put the new allocated arrays to shared pool
if (shared_pool != nullptr) {
Expand Down

0 comments on commit 81d867e

Please sign in to comment.