diff --git a/docs/tutorials/index.md b/docs/tutorials/index.md
index 57bfec7cddda..ae0851425be0 100644
--- a/docs/tutorials/index.md
+++ b/docs/tutorials/index.md
@@ -154,6 +154,17 @@ Select API:
+## Perl Tutorials
+
+* Getting Started
+ * [Machine learning in Perl](http://blogs.perl.org/users/sergey_kolychev/2017/02/machine-learning-in-perl.html)
+ * [Calculator and Robo-Shakespeare](http://blogs.perl.org/users/sergey_kolychev/2017/04/machine-learning-in-perl-part2-a-calculator-handwritten-digits-and-roboshakespeare.html)
+* Gluon
+ * [DCGAN](http://blogs.perl.org/users/sergey_kolychev/2017/10/machine-learning-in-perl-part3-deep-convolutional-generative-adversarial-network.html)
+ * [Image classification and Style transfer](http://blogs.perl.org/users/sergey_kolychev/2018/07/machine-learning-in-perl-kyuubi-goes-to-a-modelzoo-during-the-starry-night.html)
+
+
+
## Contributing Tutorials
We really appreciate contributions, and tutorials are a great way to share your knowledge and help the community. After you have followed [these steps](https://github.com/apache/incubator-mxnet/tree/master/example#contributing), please submit a pull request on Github.
diff --git a/perl-package/AI-MXNet-Gluon-Contrib/Changes b/perl-package/AI-MXNet-Gluon-Contrib/Changes
index 7b3b27a3722c..81e55aa753ab 100644
--- a/perl-package/AI-MXNet-Gluon-Contrib/Changes
+++ b/perl-package/AI-MXNet-Gluon-Contrib/Changes
@@ -1,5 +1,11 @@
Revision history for Perl extension AI::MXNet::Gluon::Contrib
+1.32 Sun Jul 15 12:12:15 PDT 2018
+ - Missing POD fixes.
+
+1.31 Sat Jul 14 08:33:21 PDT 2018
+ - Fixed CPAN indexing issue.
+
1.3 Tue Jul 10 21:19:13 PDT 2018
- Initial release
diff --git a/perl-package/AI-MXNet-Gluon-Contrib/META.json b/perl-package/AI-MXNet-Gluon-Contrib/META.json
index 52c32309879d..ec65bb01348e 100644
--- a/perl-package/AI-MXNet-Gluon-Contrib/META.json
+++ b/perl-package/AI-MXNet-Gluon-Contrib/META.json
@@ -38,5 +38,5 @@
}
},
"release_status" : "stable",
- "version" : "1.3"
+ "version" : "1.32"
}
diff --git a/perl-package/AI-MXNet-Gluon-Contrib/META.yml b/perl-package/AI-MXNet-Gluon-Contrib/META.yml
index b059f0f51bfd..aaa194debae9 100644
--- a/perl-package/AI-MXNet-Gluon-Contrib/META.yml
+++ b/perl-package/AI-MXNet-Gluon-Contrib/META.yml
@@ -18,4 +18,4 @@ no_index:
- inc
requires:
AI::MXNet: '1.31'
-version: '1.3'
+version: '1.32'
diff --git a/perl-package/AI-MXNet-Gluon-Contrib/Makefile.PL b/perl-package/AI-MXNet-Gluon-Contrib/Makefile.PL
index b27d59b3b558..6c58d6ea8669 100644
--- a/perl-package/AI-MXNet-Gluon-Contrib/Makefile.PL
+++ b/perl-package/AI-MXNet-Gluon-Contrib/Makefile.PL
@@ -39,7 +39,7 @@ my %WriteMakefileArgs = (
"AI::MXNet" => "1.31",
},
"TEST_REQUIRES" => {},
- "VERSION" => "1.3",
+ "VERSION" => "1.32",
"test" => {
"TESTS" => "t/*.t"
}
diff --git a/perl-package/AI-MXNet-Gluon-Contrib/README b/perl-package/AI-MXNet-Gluon-Contrib/README
index 1481c3e66c1a..6c0efcc3c897 100644
--- a/perl-package/AI-MXNet-Gluon-Contrib/README
+++ b/perl-package/AI-MXNet-Gluon-Contrib/README
@@ -1,5 +1,5 @@
This archive contains the distribution AI-MXNet-Gluon-Contrib,
-version 1.3:
+version 1.32:
Perl interface to MXNet Gluon Contib modules, a collection of supplemental Gluon blocks.
diff --git a/perl-package/AI-MXNet-Gluon-Contrib/lib/AI/MXNet/Gluon/Contrib.pm b/perl-package/AI-MXNet-Gluon-Contrib/lib/AI/MXNet/Gluon/Contrib.pm
index f88fb8a7b595..029bc4b65a68 100644
--- a/perl-package/AI-MXNet-Gluon-Contrib/lib/AI/MXNet/Gluon/Contrib.pm
+++ b/perl-package/AI-MXNet-Gluon-Contrib/lib/AI/MXNet/Gluon/Contrib.pm
@@ -15,10 +15,15 @@
# specific language governing permissions and limitations
# under the License.
+package AI::MXNet::Gluon::Contrib;
use strict;
use warnings;
use AI::MXNet;
use AI::MXNet::Gluon::Contrib::NN::BasicLayers;
-our $VERSION = '1.3';
+our $VERSION = '1.32';
+=head1 NAME
+
+ AI::MXNet::Gluon::Contrib - A collection of supplemental Gluon blocks.
+=cut
1;
\ No newline at end of file
diff --git a/perl-package/AI-MXNet-Gluon-Contrib/lib/AI/MXNet/Gluon/Contrib/NN/BasicLayers.pm b/perl-package/AI-MXNet-Gluon-Contrib/lib/AI/MXNet/Gluon/Contrib/NN/BasicLayers.pm
index 455284e30483..5f57e031032c 100644
--- a/perl-package/AI-MXNet-Gluon-Contrib/lib/AI/MXNet/Gluon/Contrib/NN/BasicLayers.pm
+++ b/perl-package/AI-MXNet-Gluon-Contrib/lib/AI/MXNet/Gluon/Contrib/NN/BasicLayers.pm
@@ -17,6 +17,13 @@
use strict;
use warnings;
+package AI::MXNet::Gluon::Contrib::NN::BasicLayers;
+
+=head1 NAME
+
+ AI::MXNet::Gluon::Contrib::NN::BasicLayers - An additional collection of Gluon's building blocks.
+=cut
+
use AI::MXNet::Function::Parameters;
package AI::MXNet::Gluon::NN::Concurrent;
use AI::MXNet::Gluon::Mouse;
diff --git a/perl-package/AI-MXNet-Gluon-ModelZoo/Changes b/perl-package/AI-MXNet-Gluon-ModelZoo/Changes
index c233f92458db..377dff5be8de 100644
--- a/perl-package/AI-MXNet-Gluon-ModelZoo/Changes
+++ b/perl-package/AI-MXNet-Gluon-ModelZoo/Changes
@@ -1,5 +1,8 @@
Revision history for Perl extension AI::MXNet::Gluon::ModelZoo
+1.32 Sun Aug 5 14:25:31 PDT 2018
+ - Updated vgg16/19 models
+
1.3 Tue Jul 10 21:19:13 PDT 2018
- Initial release
diff --git a/perl-package/AI-MXNet-Gluon-ModelZoo/META.json b/perl-package/AI-MXNet-Gluon-ModelZoo/META.json
index c0e1ad3af8ae..9ea969e9f5fb 100644
--- a/perl-package/AI-MXNet-Gluon-ModelZoo/META.json
+++ b/perl-package/AI-MXNet-Gluon-ModelZoo/META.json
@@ -39,5 +39,5 @@
}
},
"release_status" : "stable",
- "version" : "1.3"
+ "version" : "1.32"
}
diff --git a/perl-package/AI-MXNet-Gluon-ModelZoo/META.yml b/perl-package/AI-MXNet-Gluon-ModelZoo/META.yml
index 2493af60bbec..a04484a898a9 100644
--- a/perl-package/AI-MXNet-Gluon-ModelZoo/META.yml
+++ b/perl-package/AI-MXNet-Gluon-ModelZoo/META.yml
@@ -19,4 +19,4 @@ no_index:
requires:
AI::MXNet: '1.31'
AI::MXNet::Gluon::Contrib: '1.3'
-version: '1.3'
+version: '1.32'
diff --git a/perl-package/AI-MXNet-Gluon-ModelZoo/Makefile.PL b/perl-package/AI-MXNet-Gluon-ModelZoo/Makefile.PL
index 8427aef3dbc4..d15dfce99b8e 100644
--- a/perl-package/AI-MXNet-Gluon-ModelZoo/Makefile.PL
+++ b/perl-package/AI-MXNet-Gluon-ModelZoo/Makefile.PL
@@ -40,7 +40,7 @@ my %WriteMakefileArgs = (
"AI::MXNet::Gluon::Contrib" => "1.3"
},
"TEST_REQUIRES" => {},
- "VERSION" => "1.3",
+ "VERSION" => "1.32",
"test" => {
"TESTS" => "t/*.t"
}
diff --git a/perl-package/AI-MXNet-Gluon-ModelZoo/README b/perl-package/AI-MXNet-Gluon-ModelZoo/README
index d6d697292dbe..6b8e04b971ec 100644
--- a/perl-package/AI-MXNet-Gluon-ModelZoo/README
+++ b/perl-package/AI-MXNet-Gluon-ModelZoo/README
@@ -1,5 +1,5 @@
This archive contains the distribution AI-MXNet-Gluon-ModelZoo,
-version 1.3:
+version 1.32:
Perl interface to MXNet Gluon ModelZoo, a collection of pretrained machine learning models for computer vision.
diff --git a/perl-package/AI-MXNet-Gluon-ModelZoo/lib/AI/MXNet/Gluon/ModelZoo.pm b/perl-package/AI-MXNet-Gluon-ModelZoo/lib/AI/MXNet/Gluon/ModelZoo.pm
index 64ccd4601cf4..c9e6e7753045 100644
--- a/perl-package/AI-MXNet-Gluon-ModelZoo/lib/AI/MXNet/Gluon/ModelZoo.pm
+++ b/perl-package/AI-MXNet-Gluon-ModelZoo/lib/AI/MXNet/Gluon/ModelZoo.pm
@@ -26,7 +26,7 @@ use AI::MXNet::Gluon::ModelZoo::Vision;
use Exporter;
use base qw(Exporter);
@AI::MXNet::Gluon::ModelZoo::EXPORT_OK = qw(get_model);
-our $VERSION = '1.3';
+our $VERSION = '1.32';
=head1 NAME
diff --git a/perl-package/AI-MXNet-Gluon-ModelZoo/lib/AI/MXNet/Gluon/ModelZoo/ModelStore.pm b/perl-package/AI-MXNet-Gluon-ModelZoo/lib/AI/MXNet/Gluon/ModelZoo/ModelStore.pm
index 9269ee735663..bb258b4d9cdf 100644
--- a/perl-package/AI-MXNet-Gluon-ModelZoo/lib/AI/MXNet/Gluon/ModelZoo/ModelStore.pm
+++ b/perl-package/AI-MXNet-Gluon-ModelZoo/lib/AI/MXNet/Gluon/ModelZoo/ModelStore.pm
@@ -60,10 +60,10 @@ my %_model_sha1 = map { $_->[1] => $_->[0] } (
['ee79a8098a91fbe05b7a973fed2017a6117723a8', 'vgg11_bn'],
['6bc5de58a05a5e2e7f493e2d75a580d83efde38c', 'vgg13'],
['7d97a06c3c7a1aecc88b6e7385c2b373a249e95e', 'vgg13_bn'],
- ['649467530119c0f78c4859999e264e7bf14471a9', 'vgg16'],
- ['6b9dbe6194e5bfed30fd7a7c9a71f7e5a276cb14', 'vgg16_bn'],
- ['f713436691eee9a20d70a145ce0d53ed24bf7399', 'vgg19'],
- ['9730961c9cea43fd7eeefb00d792e386c45847d6', 'vgg19_bn']
+ ['e660d4569ccb679ec68f1fd3cce07a387252a90a', 'vgg16'],
+ ['7f01cf050d357127a73826045c245041b0df7363', 'vgg16_bn'],
+ ['ad2f660d101905472b83590b59708b71ea22b2e5', 'vgg19'],
+ ['f360b758e856f1074a85abd5fd873ed1d98297c3', 'vgg19_bn']
);
my $apache_repo_url = 'http://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/';
diff --git a/perl-package/AI-MXNet/Changes b/perl-package/AI-MXNet/Changes
index b522759529a0..8b9463ee84e8 100644
--- a/perl-package/AI-MXNet/Changes
+++ b/perl-package/AI-MXNet/Changes
@@ -1,4 +1,10 @@
Revision history for Perl extension AI::MXNet
+
+1.32 Sun Aug 5 14:25:31 PDT 2018
+ - Several new metric classes
+ - Expanded documentation
+ - Bugfixes.
+
1.31 Tue Jul 10 21:19:13 PDT 2018
- Memory leak fix for Gluon API
- Added summary function for Gluon models
diff --git a/perl-package/AI-MXNet/META.json b/perl-package/AI-MXNet/META.json
index a43f77d3662a..7d0ab96c0593 100644
--- a/perl-package/AI-MXNet/META.json
+++ b/perl-package/AI-MXNet/META.json
@@ -30,7 +30,7 @@
},
"runtime" : {
"requires" : {
- "AI::MXNetCAPI" : "1.3",
+ "AI::MXNetCAPI" : "1.32",
"AI::NNVMCAPI" : "1.3",
"Function::Parameters" : "1.0705",
"Hash::Ordered" : "0.012",
@@ -45,5 +45,5 @@
}
},
"release_status" : "stable",
- "version" : "1.31"
+ "version" : "1.32"
}
diff --git a/perl-package/AI-MXNet/META.yml b/perl-package/AI-MXNet/META.yml
index 642f370ee81f..ee5d677a8139 100644
--- a/perl-package/AI-MXNet/META.yml
+++ b/perl-package/AI-MXNet/META.yml
@@ -17,7 +17,7 @@ no_index:
- t
- inc
requires:
- AI::MXNetCAPI: '1.3'
+ AI::MXNetCAPI: '1.32'
AI::NNVMCAPI: '1.3'
Function::Parameters: '1.0705'
Hash::Ordered: '0.012'
@@ -25,4 +25,4 @@ requires:
Mouse: v2.1.0
PDL: '2.007'
PDL::CCS: '1.23.4'
-version: '1.31'
+version: '1.32'
diff --git a/perl-package/AI-MXNet/Makefile.PL b/perl-package/AI-MXNet/Makefile.PL
index f8f0d9c63fe0..59036d905f82 100644
--- a/perl-package/AI-MXNet/Makefile.PL
+++ b/perl-package/AI-MXNet/Makefile.PL
@@ -46,7 +46,7 @@ my %WriteMakefileArgs = (
"GraphViz" => "2.14"
},
"TEST_REQUIRES" => {},
- "VERSION" => "1.31",
+ "VERSION" => "1.32",
"test" => {
"TESTS" => "t/*.t"
}
diff --git a/perl-package/AI-MXNet/README b/perl-package/AI-MXNet/README
index e34970a79723..2f1010a43f9a 100644
--- a/perl-package/AI-MXNet/README
+++ b/perl-package/AI-MXNet/README
@@ -1,5 +1,5 @@
This archive contains the distribution AI-MXNet,
-version 1.31:
+version 1.32:
Perl interface to MXNet machine learning library
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet.pm b/perl-package/AI-MXNet/lib/AI/MXNet.pm
index 4e40fd7298b2..651ca92ad69a 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet.pm
@@ -51,7 +51,7 @@ use AI::MXNet::Gluon;
use AI::MXNet::NDArray::Sparse;
use AI::MXNet::Symbol::Sparse;
use AI::MXNet::Engine;
-our $VERSION = '1.31';
+our $VERSION = '1.32';
sub import
{
@@ -132,7 +132,7 @@ AI::MXNet - Perl interface to MXNet machine learning library
## Convolutional NN for recognizing hand-written digits in MNIST dataset
## It's considered "Hello, World" for Neural Networks
- ## For more info about the MNIST problem please refer to http://neuralnetworksanddeeplearning.com/chap1.html
+ ## For more info about the MNIST problem please refer to L
use strict;
use warnings;
@@ -187,9 +187,104 @@ AI::MXNet - Perl interface to MXNet machine learning library
my $res = $mod->score($val_dataiter, mx->metric->create('acc'));
ok($res->{accuracy} > 0.8);
+ ## Gluon MNIST example
+
+ my $net = nn->Sequential();
+ $net->name_scope(sub {
+ $net->add(nn->Dense(128, activation=>'relu'));
+ $net->add(nn->Dense(64, activation=>'relu'));
+ $net->add(nn->Dense(10));
+ });
+ $net->hybridize;
+
+ # data
+ sub transformer
+ {
+ my ($data, $label) = @_;
+ $data = $data->reshape([-1])->astype('float32')/255;
+ return ($data, $label);
+ }
+ my $train_data = gluon->data->DataLoader(
+ gluon->data->vision->MNIST('./data', train=>1, transform => \&transformer),
+ batch_size=>$batch_size, shuffle=>1, last_batch=>'discard'
+ );
+
+ ## training
+ sub train
+ {
+ my ($epochs, $ctx) = @_;
+ # Collect all parameters from net and its children, then initialize them.
+ $net->initialize(mx->init->Xavier(magnitude=>2.24), ctx=>$ctx);
+ # Trainer is for updating parameters with gradient.
+ my $trainer = gluon->Trainer($net->collect_params(), 'sgd', { learning_rate => $lr, momentum => $momentum });
+ my $metric = mx->metric->Accuracy();
+ my $loss = gluon->loss->SoftmaxCrossEntropyLoss();
+
+ for my $epoch (0..$epochs-1)
+ {
+ # reset data iterator and metric at begining of epoch.
+ $metric->reset();
+ enumerate(sub {
+ my ($i, $d) = @_;
+ my ($data, $label) = @$d;
+ $data = $data->as_in_context($ctx);
+ $label = $label->as_in_context($ctx);
+ # Start recording computation graph with record() section.
+ # Recorded graphs can then be differentiated with backward.
+ my $output;
+ autograd->record(sub {
+ $output = $net->($data);
+ my $L = $loss->($output, $label);
+ $L->backward;
+ });
+ # take a gradient step with batch_size equal to data.shape[0]
+ $trainer->step($data->shape->[0]);
+ # update metric at last.
+ $metric->update([$label], [$output]);
+
+ if($i % $log_interval == 0 and $i > 0)
+ {
+ my ($name, $acc) = $metric->get();
+ print "[Epoch $epoch Batch $i] Training: $name=$acc\n";
+ }
+ }, \@{ $train_data });
+
+ my ($name, $acc) = $metric->get();
+ print "[Epoch $epoch] Training: $name=$acc\n";
+
+ my ($val_name, $val_acc) = test($ctx);
+ print "[Epoch $epoch] Validation: $val_name=$val_acc\n"
+ }
+ $net->save_parameters('mnist.params');
+ }
+
+ train($epochs, $cuda ? mx->gpu(0) : mx->cpu);
+
=head1 DESCRIPTION
Perl interface to MXNet machine learning library.
+ MXNet supports the Perl programming language.
+ The MXNet Perl package brings flexible and efficient GPU computing and
+ state-of-art deep learning to Perl.
+ It enables you to write seamless tensor/matrix computation with multiple GPUs in Perl.
+ It also lets you construct and customize the state-of-art deep learning models in Perl,
+ and apply them to tasks, such as image classification and data science challenges.
+
+ One important thing to internalize is that Perl interface is written to be as close as possible to the Python’s API,
+ so most, if not all of Python’s documentation and examples should just work in Perl after making few changes
+ in order to make the code a bit more Perlish. In nutshell just add $ sigils and replace . = \n with -> => ;
+ and in 99% of cases that’s all that is needed there.
+ In addition please refer to very detailed L.
+
+ AI::MXNet supports new imperative PyTorch like Gluon MXNet interface.
+ Please get acquainted with this new interface at L.
+
+ For specific Perl Gluon usage please refer to Perl examples and tests directories on github,
+ but be assured that the Python and Perl usage are extremely close in order to make the use
+ of the Python Gluon docs and examples as easy as possible.
+
+ AI::MXNet is seamlessly glued with L, the C++ level state can be easily initialized from PDL
+ and the results can be transferred to PDL objects in order to allow you to use all the glory and power of the PDL!
=head1 BUGS AND INCOMPATIBILITIES
@@ -198,9 +293,9 @@ AI::MXNet - Perl interface to MXNet machine learning library
=head1 SEE ALSO
- http://mxnet.io/
- https://github.com/dmlc/mxnet/tree/master/perl-package
- Function::Parameters, Mouse
+ L
+ L
+ L, L
=head1 AUTHOR
@@ -208,6 +303,6 @@ AI::MXNet - Perl interface to MXNet machine learning library
=head1 COPYRIGHT & LICENSE
- This library is licensed under Apache 2.0 license https://www.apache.org/licenses/LICENSE-2.0
+ This library is licensed under Apache 2.0 license L
=cut
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/AutoGrad.pm b/perl-package/AI-MXNet/lib/AI/MXNet/AutoGrad.pm
index 160cd968f95b..c1e5f06e12bd 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/AutoGrad.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/AutoGrad.pm
@@ -46,16 +46,38 @@ EOP
AI::MXNet::AutoGrad - Autograd for NDArray.
=cut
+=head1 DESCRIPTION
+
+ Auto gradients differentiation for dynamic graphs, primarily used with Gluon.
+
+=cut
+
+=head1 SYNOPSIS
+
+ use AI::MXNet qw(mx);
+ my $x = mx->nd->ones([1]);
+ $x->attach_grad;
+ my $z;
+ mx->autograd->record(sub {
+ $z = mx->nd->elemwise_add($x->exp, $x);
+ });
+ my $dx = mx->autograd->grad($z, $x, create_graph=>1);
+ ok(abs($dx->asscalar - 3.71828175) < 1e-7);
+ $dx->backward;
+ ok(abs($x->grad->asscalar - 2.71828175) < 1e-7);
+
+=cut
+
=head2 set_is_training
Set status to training/not training. When training, graph will be constructed
- for gradient computation. Operators will also run with ctx.is_train=True. For example,
+ for gradient computation. Operators will also run with $is_train=1. For example,
Dropout will drop inputs randomly when is_train=True while simply passing through
- if is_train=False.
+ if $is_train=0.
Parameters
----------
- is_train: bool
+ $is_train: Bool
Returns
-------
@@ -75,7 +97,7 @@ method set_is_training(Bool $is_train)
Parameters
----------
- is_recoding: bool
+ $is_recoding: Bool
Returns
-------
@@ -163,9 +185,9 @@ method mark_variables(
Output NDArray(s)
:$head_grads=: Maybe[AI::MXNet::NDArray|ArrayRef[AI::MXNet::NDArray|Undef]]
Gradients with respect to heads.
- :$retain_graph=0: bool, optional
+ :$retain_graph=0: Bool, optional
Whether to retain graph.
- :$train_mode=1: bool, optional
+ :$train_mode=1: Bool, optional
Whether to do backward for training or predicting.
=cut
method backward(
@@ -196,11 +218,11 @@ method backward(
Parameters
----------
- outputs: array ref of NDArray
+ outputs: ArrayRef[AI::MXNet::NDArray]
Returns
-------
- gradients: array ref of NDArray
+ gradients: ArrayRef[AI::MXNet::NDArray]
=cut
@@ -215,14 +237,14 @@ method compute_gradient(ArrayRef[AI::MXNet::NDArray] $outputs)
Parameters
----------
- func: a perl sub
+ $func: CodeRef
The forward (loss) function.
- argnum: an int or a array ref of int
+ $argnum: Maybe[Int|ArrayRef[Int]]
The index of argument to calculate gradient for.
Returns
-------
- grad_and_loss_func: a perl sub
+ grad_and_loss_func: CodeRef
A function that would compute both the gradient of arguments and loss value.
=cut
@@ -256,29 +278,29 @@ method grad_and_loss(CodeRef $func, Maybe[Int|ArrayRef[Int]] $argnum=)
returned as new NDArrays instead of stored into `variable.grad`.
Supports recording gradient graph for computing higher order gradients.
- .. Note: Currently only a very limited set of operators support higher order
+ Note: Currently only a very limited set of operators support higher order
gradients.
Parameters
----------
- $heads: NDArray or array ref of NDArray
+ $heads: AI::MXNet::NDArray|ArrayRef[AI::MXNet::NDArray]
Output NDArray(s)
- $variables: NDArray or list of NDArray
+ $variables: AI::MXNet::NDArray|ArrayRef[AI::MXNet::NDArray]
Input variables to compute gradients for.
- :$head_grads=: NDArray or list of NDArray or undef
+ :$head_grads=: Maybe[AI::MXNet::NDArray|ArrayRef[AI::MXNet::NDArray|Undef]]
Gradients with respect to heads.
- :$retain_graph=: bool
+ :$retain_graph=: Bool
Whether to keep computation graph to differentiate again, instead
of clearing history and release memory. Defaults to the same value
as create_graph.
- :$create_graph=0: bool
- Whether to record gradient graph for computing higher order
- $train_mode=1: bool, optional
+ :$create_graph=0: Bool
+ Whether to record gradient graph for computing of higher order gradients.
+ $train_mode=1: Bool, optional
Whether to do backward for training or prediction.
Returns
-------
- NDArray or list of NDArray:
+ AI::MXNet::NDArray|ArrayRef[AI::MXNet::NDArray]:
Gradients with respect to variables.
Examples
@@ -349,7 +371,7 @@ method grad(
Executes $sub within an autograd training scope context.
Parameters
----------
- CodeRef $sub: a perl sub
+ $sub: CodeRef
=cut
method train_mode(CodeRef $sub)
@@ -365,7 +387,7 @@ method train_mode(CodeRef $sub)
Executes $sub within an autograd predicting scope context.
Parameters
----------
- CodeRef $sub: a perl sub
+ $sub: CodeRef
=cut
method predict_mode(CodeRef $sub)
@@ -382,8 +404,8 @@ method predict_mode(CodeRef $sub)
and captures code that needs gradients to be calculated.
Parameters
----------
- CodeRef $sub: a perl sub
- Maybe[Bool] :$train_mode=1
+ $sub: CodeRef
+ :$train_mode=1 : Maybe[Bool]
=cut
method record(CodeRef $sub, Maybe[Bool] :$train_mode=1)
@@ -409,8 +431,8 @@ method record(CodeRef $sub, Maybe[Bool] :$train_mode=1)
and captures code that needs gradients to be calculated.
Parameters
----------
- CodeRef $sub: a perl sub
- Maybe[Bool] :$train_mode=0
+ $sub: CodeRef
+ :$train_mode=0 : Maybe[Bool]
=cut
method pause(CodeRef $sub, Maybe[Bool] :$train_mode=0)
@@ -436,11 +458,11 @@ method pause(CodeRef $sub, Maybe[Bool] :$train_mode=0)
Parameters
----------
- x : NDArray
- Array representing the head of computation graph.
+ $x : AI::MXNet::NDArray
+ AI::MXNet::NDArray representing the head of computation graph.
Returns
-------
- Symbol
+ AI::MXNet::Symbol
The retrieved Symbol.
=cut
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Base.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Base.pm
index f7daea2a7871..3f6bd8341325 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Base.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Base.pm
@@ -21,7 +21,7 @@ use warnings;
use PDL;
use PDL::Types ();
use PDL::CCS::Nd;
-use AI::MXNetCAPI 1.3;
+use AI::MXNetCAPI 1.32;
use AI::NNVMCAPI 1.3;
use AI::MXNet::Types;
use Time::HiRes;
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/CachedOp.pm b/perl-package/AI-MXNet/lib/AI/MXNet/CachedOp.pm
index 27ec6dc0d2a5..7e73ded8ad07 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/CachedOp.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/CachedOp.pm
@@ -22,6 +22,11 @@ package AI::MXNet::CachedOp;
AI::MXNet::CachedOp - A wrapper around CachedOpHandle
=cut
+=head1 DESCRIPTION
+
+ Internal module, used as a part of AI::MXNet::Gluon::HybridBlock.
+=cut
+
use strict;
use warnings;
use AI::MXNet::Base;
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Callback.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Callback.pm
index da3309700394..b2a0b2948154 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Callback.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Callback.pm
@@ -25,7 +25,38 @@ use overload "&{}" => sub { my $self = shift; sub { $self->call(@_) } };
=head1 NAME
- AI::MXNet::Callback - A collection of predefined callback functions
+ AI::MXNet::Callback - A collection of predefined callback functions.
+=cut
+
+=head1 DESCRIPTION
+
+ A collection of predefined callback functions, mainly to be used in AI::MXNet::Module::Base::fit.
+=cut
+
+=head1 SYNOPSIS
+
+ my $model = mx->mod->Module(
+ symbol => $net,
+ context => $contexts
+ );
+ $model->fit(
+ $data_iter,
+ eval_metric => mx->metric->Perplexity,
+ kvstore => $kv_store,
+ optimizer => $optimizer,
+ optimizer_params => {
+ learning_rate => $lr,
+ momentum => $mom,
+ wd => $wd,
+ clip_gradient => 5,
+ rescale_grad => 1/$batch_size,
+ lr_scheduler => AI::MXNet::FactorScheduler->new(step => 1000, factor => 0.99)
+ },
+ initializer => mx->init->Xavier(factor_type => "in", magnitude => 2.34),
+ num_epoch => $num_epoch,
+ batch_end_callback => mx->callback->Speedometer($batch_size, $disp_batches),
+ ($chkp_epoch ? (epoch_end_callback => [mx->callback->module_checkpoint($model, $chkp_prefix, $chkp_epoch), \&sample]) : ())
+ );
=cut
=head2 module_checkpoint
@@ -36,9 +67,9 @@ use overload "&{}" => sub { my $self = shift; sub { $self->call(@_) } };
----------
$mod : subclass of AI::MXNet::Module::Base
The module to checkpoint.
- $prefix : str
+ $prefix : Str
The file prefix to checkpoint to
- $period=1 : int
+ $period=1 : Int
How many epochs to wait before checkpointing. Default is 1.
$save_optimizer_states=0 : Bool
Whether to save optimizer states for later training.
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Context.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Context.pm
index e116e6e7a8d1..826e7baf905b 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Context.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Context.pm
@@ -78,6 +78,13 @@ use overload
This class governs the device context of AI::MXNet::NDArray objects.
=cut
+=head1 SYNOPSIS
+
+ use AI::MXNet qw(mx);
+ print nd->array([[1,2],[3,4]], ctx => mx->cpu)->aspdl;
+ my $arr_gpu = nd->random->uniform(shape => [10, 10], ctx => mx->gpu(0));
+=cut
+
=head2
Constructing a context.
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Contrib.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Contrib.pm
index 9f6a0ab01600..c470acab60e7 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Contrib.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Contrib.pm
@@ -21,6 +21,32 @@ use warnings;
use AI::MXNet::Contrib::Symbol;
use AI::MXNet::Contrib::NDArray;
+=head1 NAME
+
+ AI::MXNet::Contrib - An interface to experimental operators defined in C++ space.
+=cut
+
+=head1 SYNOPSIS
+
+ my $embed;
+ if($sparse_embedding)
+ {
+ my $embed_weight = mx->sym->Variable('embed_weight', stype=>'row_sparse');
+ $embed = mx->sym->contrib->SparseEmbedding(
+ data=>$data, input_dim=>$num_words,
+ weight=>$embed_weight, output_dim=>$num_embed,
+ name=>'embed'
+ );
+ }
+ else
+ {
+ $embed = mx->sym->Embedding(
+ data=>$data, input_dim=>$num_words,
+ output_dim=>$num_embed, name=>'embed'
+ );
+ }
+=cut
+
sub sym { 'AI::MXNet::Contrib::Symbol' }
sub symbol { 'AI::MXNet::Contrib::Symbol' }
sub nd { 'AI::MXNet::Contrib::NDArray' }
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Contrib/NDArray.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Contrib/NDArray.pm
index 0c1547e990a5..574ecc443f79 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Contrib/NDArray.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Contrib/NDArray.pm
@@ -21,4 +21,14 @@ use warnings;
use parent 'AI::MXNet::AutoLoad';
sub config { ('contrib', 'AI::MXNet::NDArray') }
+=head1 NAME
+
+ AI::MXNet::Contrib::NDArray - An interface to experimental NDArray operators defined in C++ space.
+=cut
+
+=head1 SYNOPSIS
+
+ mx->contrib->ndarray->fft(nd->random->normal(0, 1, [3, 4], ctx => mx->gpu));
+=cut
+
1;
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Contrib/Symbol.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Contrib/Symbol.pm
index d84f831c0fa6..d5a041a085fe 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Contrib/Symbol.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Contrib/Symbol.pm
@@ -21,4 +21,30 @@ use warnings;
use parent 'AI::MXNet::AutoLoad';
sub config { ('contrib', 'AI::MXNet::Symbol') }
+=head1 NAME
+
+ AI::MXNet::Contrib - An interface to experimental symbol operators defined in C++ space.
+=cut
+
+=head1 SYNOPSIS
+
+ my $embed;
+ if($sparse_embedding)
+ {
+ my $embed_weight = mx->sym->Variable('embed_weight', stype=>'row_sparse');
+ $embed = mx->sym->contrib->SparseEmbedding(
+ data=>$data, input_dim=>$num_words,
+ weight=>$embed_weight, output_dim=>$num_embed,
+ name=>'embed'
+ );
+ }
+ else
+ {
+ $embed = mx->sym->Embedding(
+ data=>$data, input_dim=>$num_words,
+ output_dim=>$num_embed, name=>'embed'
+ );
+ }
+=cut
+
1;
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/CudaModule.pm b/perl-package/AI-MXNet/lib/AI/MXNet/CudaModule.pm
index 5fa66b26472e..b3272fe8b048 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/CudaModule.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/CudaModule.pm
@@ -34,6 +34,11 @@ our %DTYPE_CPP_TO_STR = qw(
int64_t int64
);
+=head1 NAME
+
+ AI::MXNet::CudaModule - Interface to runtime cuda kernel compile module.
+=cut
+
=head1 DESCRIPTION
Interface to runtime cuda kernel compile module.
@@ -81,12 +86,12 @@ our %DTYPE_CPP_TO_STR = qw(
Parameters
----------
- source : str
+ source : Str
Complete source code.
- options : array ref of str
+ options : Str|ArrayRef[Str]
Compiler flags. For example, use "-I/usr/local/cuda/include" to
add cuda headers to include path.
- exports : array ref of str
+ exports : Str|ArrayRef[Str]
Export kernel names.
=cut
@@ -124,9 +129,9 @@ sub DEMOLISH
Parameters
----------
- name : str
+ $name : Str
String name of the kernel.
- signature : str
+ $signature : Str
Function signature for the kernel. For example, if a kernel is
declared as::
@@ -196,7 +201,7 @@ use AI::MXNet::Base;
=head1 NAME
- AI::MXNet::CudaKernel
+ AI::MXNet::CudaKernel - Constructs CUDA kernel.
=cut
=head1 DESCRIPTION
@@ -228,15 +233,15 @@ sub DEMOLISH
Parameters
----------
- $args : array ref of NDArray or numbers
+ $args : ArrayRef[AI::MXNet::NDArray|Num]
List of arguments for kernel. NDArrays are expected for pointer
types (e.g. `float*`, `double*`) while numbers are expected for
non-pointer types (e.g. `int`, `float`).
$ctx : AI::MXNet::Context
The context to launch kernel on. Must be GPU context.
- $grid_dims : array ref of 3 integers
+ $grid_dims : array ref of 3 integers (CudaKernelShape)
Grid dimensions for CUDA kernel.
- $block_dims : array ref of 3 integers
+ $block_dims : array ref of 3 integers (CudaKernelShape)
Block dimensions for CUDA kernel.
$shared_mem=0 : integer, optional
Size of dynamically allocated shared memory. Defaults to 0.
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Engine.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Engine.pm
index c4ee262dfc96..1d73e5584268 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Engine.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Engine.pm
@@ -20,9 +20,28 @@ use strict;
use warnings;
use AI::MXNet::Function::Parameters;
use AI::MXNet::Base;
+
=head1 NAME
- AI::MXNet::Engine - Engine properties management.
+ AI::MXNet::Engine - Allows management of properties of the MXNet's engine.
+=cut
+
+=head1 SYNOPSIS
+
+ my $x;
+ mx->engine->bulk(10, sub {
+ $x = mx->nd->ones([10]);
+ $x *= 2;
+ $x += 1;
+ $x->wait_to_read();
+ $x += 1;
+ ok(($x->aspdl == 4)->all);
+ for my $i (1..100)
+ {
+ $x += 1;
+ }
+ });
+ ok(($x->aspdl == 104)->all);
=cut
=head2 set_bulk_size
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Executor.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Executor.pm
index edcaabea1f47..573abbf588f2 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Executor.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Executor.pm
@@ -32,7 +32,6 @@ has '_symbol' => (is => 'rw', init_arg => 'symbol', isa => 'AI::MXN
has '_ctx' => (is => 'rw', init_arg => 'ctx', isa => 'AI::MXNet::Context' );
has '_grad_req' => (is => 'rw', init_arg => 'grad_req', isa => 'Maybe[Str|ArrayRef[Str]|HashRef[Str]]');
has '_group2ctx' => (is => 'rw', init_arg => 'group2ctx', isa => 'Maybe[HashRef[AI::MXNet::Context]]');
-has '_monitor_callback' => (is => 'rw', isa => 'CodeRef');
has [qw/_arg_dict
_grad_dict
_aux_dict
@@ -42,6 +41,18 @@ has [qw/_arg_dict
=head1 NAME
AI::MXNet::Executor - The actual executing object of MXNet.
+=cut
+
+=head1 SYNOPSIS
+
+ my $executor = $sym->bind(
+ ctx => mx->Context('cpu'),
+ args => [$lhs_arr, $rhs_arr],
+ args_grad => [$lhs_grad, $rhs_grad]
+ );
+ $executor->forward(1);
+ print $executor->outputs->[0]->aspdl;
+=cut
=head2 new
@@ -138,7 +149,7 @@ method _get_outputs()
Parameters
----------
- $is_train=0: bool, optional
+ $is_train=0: Bool, optional
whether this forward is for evaluation purpose. If True,
a backward call is expected to follow. Otherwise following
backward is invalid.
@@ -200,12 +211,12 @@ method forward(Int $is_train=0, %kwargs)
Parameters
----------
- out_grads : NDArray or an array ref of NDArrays or hash ref of NDArrays, optional.
+ $out_grads : NDArray or an array ref of NDArrays or hash ref of NDArrays, optional.
The gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
- is_train : bool, default 1
+ $is_train : Bool, default 1
Whether this backward is for training or inference. Note that in rare
cases you want to call backward with is_train=0 to get gradient
during inference.
@@ -241,17 +252,16 @@ method backward(
Parameters
----------
- callback : subref
+ $callback : CodeRef
Takes a string and an NDArrayHandle.
=cut
method set_monitor_callback(CodeRef $callback)
{
- $self->_monitor_callback($callback);
check_call(
AI::MXNetCAPI::ExecutorSetMonitorCallback(
$self->handle,
- $self->_monitor_callback
+ $callback
)
);
}
@@ -262,7 +272,7 @@ method set_monitor_callback(CodeRef $callback)
Returns
-------
- arg_dict : HashRef[AI::MXNet::NDArray]
+ $arg_dict : HashRef[AI::MXNet::NDArray]
The map that maps a name of the arguments to the NDArrays.
=cut
@@ -285,7 +295,7 @@ method arg_dict()
Returns
-------
- grad_dict : HashRef[AI::MXNet::NDArray]
+ $grad_dict : HashRef[AI::MXNet::NDArray]
The map that maps a name of the arguments to the gradient NDArrays.
=cut
@@ -308,7 +318,7 @@ method grad_dict()
Returns
-------
- aux_dict : HashRef[AI::MXNet::NDArray]
+ $aux_dict : HashRef[AI::MXNet::NDArray]
The map that maps a name of the auxiliary states to the NDArrays.
=cut
@@ -331,7 +341,7 @@ method aux_dict()
Returns
-------
- output_dict : HashRef[AI::MXNet::NDArray]
+ $output_dict : HashRef[AI::MXNet::NDArray]
The map that maps a name of the outputs to the NDArrays.
=cut
@@ -354,13 +364,13 @@ method output_dict()
Parameters
----------
- arg_params : HashRef[AI::MXNet::NDArray]
+ $arg_params : HashRef[AI::MXNet::NDArray]
Parameters, hash ref of name to NDArray of arguments
- aux_params : Maybe[HashRef[AI::MXNet::NDArray]], optional
+ $aux_params= : Maybe[HashRef[AI::MXNet::NDArray]], optional
Parameters, hash ref of name to NDArray of auxiliary states.
- allow_extra_params : boolean, optional
+ $allow_extra_params= : Bool, optional
Whether to allow extra parameters that are not needed by symbol
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
@@ -415,9 +425,9 @@ method copy_params_from(
----------
$kwargs : HashRef[Shape]
new shape for arguments.
- :$partial_shaping : bool
+ :$partial_shaping : Bool
Whether to allow changing the shape of unspecified arguments.
- :$allow_up_sizing : bool
+ :$allow_up_sizing : Bool
Whether to allow allocating new ndarrays that's larger than the original.
Returns
@@ -501,7 +511,7 @@ method reshape(HashRef[Shape] $kwargs, Int :$partial_shaping=0, Int :$allow_up_s
Returns
-------
- debug_str : string
+ $debug_str : Str
Debug string of the executor.
=cut
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Gluon.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Gluon.pm
index 7f92378c0823..92c8386c0d14 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Gluon.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Gluon.pm
@@ -57,4 +57,106 @@ sub data { 'AI::MXNet::Gluon::Data' }
sub utils { 'AI::MXNet::Gluon::Utils' }
sub model_zoo { require AI::MXNet::Gluon::ModelZoo; 'AI::MXNet::Gluon::ModelZoo' }
+=head1 NAME
+
+ AI::MXNet::Gluon - High-level interface for MXNet.
+=cut
+
+=head1 DESCRIPTION
+
+ The AI::MXNet::Gluon package is a high-level interface for MXNet designed to be easy to use,
+ while keeping most of the flexibility of a low level API.
+ AI::MXNet::Gluon supports both imperative and symbolic programming,
+ making it easy to train complex models imperatively in Perl.
+
+ Based on the the Gluon API specification,
+ the Gluon API in Apache MXNet provides a clear, concise, and simple API for deep learning.
+ It makes it easy to prototype, build, and train deep learning models without sacrificing training speed.
+
+ Advantages.
+
+ Simple, Easy-to-Understand Code: Gluon offers a full set of plug-and-play neural network building blocks,
+ including predefined layers, optimizers, and initializers.
+
+ Flexible, Imperative Structure: Gluon does not require the neural network model to be rigidly defined,
+ but rather brings the training algorithm and model closer together to provide flexibility in the development process.
+
+ Dynamic Graphs: Gluon enables developers to define neural network models that are dynamic,
+ meaning they can be built on the fly, with any structure, and using any of Perl’s native control flow.
+
+ High Performance: Gluon provides all of the above benefits without impacting the training speed that the underlying engine provides.
+
+
+ Simple, Easy-to-Understand Code
+ Use plug-and-play neural network building blocks, including predefined layers, optimizers, and initializers:
+
+ use AI::MXNet qw(mx);
+ use AI::MXNet::Gluon qw(gluon);
+
+ my $net = gluon->nn->Sequential;
+ # When instantiated, Sequential stores a chain of neural network layers.
+ # Once presented with data, Sequential executes each layer in turn, using
+ # the output of one layer as the input for the next
+ $net->name_scope(sub {
+ $net->add(gluon->nn->Dense(256, activation=>"relu")); # 1st layer (256 nodes)
+ $net->add(gluon->nn->Dense(256, activation=>"relu")); # 2nd hidden layer
+ $net->add(gluon->nn->Dense($num_outputs));
+ });
+
+ Flexible, Imperative Structure.
+
+ Prototype, build, and train neural networks in fully imperative manner using the AI::MXNet::MXNet package and the Gluon trainer method:
+
+ use AI::MXNet::Base; # provides helpers, such as zip, enumerate, etc.
+ use AI::MXNet::AutoGrad qw(autograd);
+ my $epochs = 10;
+
+ for(1..$epochs)
+ {
+ for(zip($train_data))
+ {
+ my ($data, $label) = @$_;
+ autograd->record(sub {
+ my $output = $net->($data); # the forward iteration
+ my $loss = gluon->loss->softmax_cross_entropy($output, $label);
+ $loss->backward;
+ });
+ $trainer->step($data->shape->[0]); ## batch size
+ }
+ }
+
+ Dynamic Graphs.
+
+ Build neural networks on the fly for use cases where neural networks must change in size and shape during model training:
+
+ use AI::MXNet::Function::Parameters;
+
+ method forward(GluonClass $F, GluonInput $inputs, GluonInput :$tree)
+ {
+ my $children_outputs = [
+ map { $self->forward($F, $inputs, $_) @{ $tree->children }
+ ];
+ #Recursively builds the neural network based on each input sentence’s
+ #syntactic structure during the model definition and training process
+ ...
+ }
+
+ High Performance
+
+ Easily cache the neural network to achieve high performance by defining your neural network with HybridSequential
+ and calling the hybridize method:
+
+ use AI::MXNet::Gluon::NN qw(nn);
+
+ my $net = nn->HybridSequential;
+ $net->name_scope(sub {
+ $net->add(nn->Dense(256, activation=>"relu"));
+ $net->add(nn->Dense(128, activation=>"relu"));
+ $net->add(nn->Dense(2));
+ });
+
+ $net->hybridize();
+ See more at L
+=cut
+
1;
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Gluon/Block.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Gluon/Block.pm
index be819ac9d4e5..1b35e7864c12 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Gluon/Block.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Gluon/Block.pm
@@ -855,20 +855,20 @@ package AI::MXNet::Gluon::HybridBlock;
=head2 DESCRIPTION
- `HybridBlock` supports forwarding with both Symbol and NDArray.
+ HybridBlock supports forwarding with both Symbol and NDArray.
- Forward computation in `HybridBlock` must be static to work with `Symbol`s,
- i.e. you cannot call `.asnumpy()`, `.shape`, `.dtype`, etc on tensors.
+ Forward computation in HybridBlock must be static to work with Symbols,
+ i.e. you cannot call aspdl, shape, dtype, etc on tensors.
Also, you cannot use branching or loop logic that bases on non-constant
expressions like random numbers or intermediate results, since they change
the graph structure for each iteration.
- Before activating with `hybridize()`, `HybridBlock` works just like normal
- `Block`. After activation, `HybridBlock` will create a symbolic graph
+ Before activating with hybridize(), HybridBlock works just like normal
+ Block. After activation, HybridBlock will create a symbolic graph
representing the forward computation and cache it. On subsequent forwards,
- the cached graph will be used instead of `hybrid_forward`.
+ the cached graph will be used instead of hybrid_forward.
- Refer `Hybrid tutorial `_ to see
+ Refer Hybrid tutorial L to see
the end-to-end usage.
=cut
@@ -1141,7 +1141,7 @@ method _call_cached_op(@args)
=head2 forward
Defines the forward computation. Arguments can be either
- `NDArray` or `Symbol`.
+ NDArray or Symbol
=cut
method forward($x, @args)
@@ -1225,12 +1225,12 @@ method hybrid_forward($F, $x, @args)
or the C++ interface.
When there are only one input, it will have name 'data'. When there
- Are more than one inputs, they will be named as `data0`, `data1`, etc.
+ Are more than one inputs, they will be named as 'data0', 'data1', etc.
Parameters
----------
$path : str
- Path to save model. Two files `path-symbol.json` and `path-xxxx.params`
+ Path to save model. Two files 'path-symbol.json' and 'path-xxxx.params'
will be created, where xxxx is the 4 digits epoch number.
:$epoch=0 : Int
Epoch number of saved model.
@@ -1298,20 +1298,20 @@ extends 'AI::MXNet::Gluon::HybridBlock';
Examples
--------
- >>> # To extract the feature from fc1 and fc2 layers of AlexNet:
- >>> alexnet = gluon.model_zoo.vision.alexnet(pretrained=True, ctx=mx.cpu(),
- prefix='model_')
- >>> inputs = mx.sym.var('data')
- >>> out = alexnet(inputs)
- >>> internals = out.get_internals()
- >>> print(internals.list_outputs())
+ >>> # To extract the feature from fc1 and fc2 layers of AlexNet
+ >>> $alexnet = gluon->model_zoo->vision->alexnet(pretrained=>1, ctx=>mx->cpu(),
+ prefix=>'model_');
+ >>> $inputs = mx->sym->var('data');
+ >>> $out = $alexnet->($inputs);
+ >>> $internals = $out->get_internals()
+ >>> print($internals->list_outputs())
['data', ..., 'model_dense0_relu_fwd_output', ..., 'model_dense1_relu_fwd_output', ...]
- >>> outputs = [internals['model_dense0_relu_fwd_output'],
- internals['model_dense1_relu_fwd_output']]
+ >>> $outputs = [$internals->slice('model_dense0_relu_fwd_output'),
+ $internals->slice('model_dense1_relu_fwd_output')];
>>> # Create SymbolBlock that shares parameters with alexnet
- >>> feat_model = gluon.SymbolBlock(outputs, inputs, params=alexnet.collect_params())
- >>> x = mx.nd.random_normal(shape=(16, 3, 224, 224))
- >>> print(feat_model(x))
+ >>> $feat_model = gluon->SymbolBlock($outputs, $inputs, params=>$alexnet->collect_params());
+ >>> $x = mx->nd->random_normal(shape=>[16, 3, 224, 224]);
+ >>> print($feat_model->($x));
=cut
has [qw/outputs inputs/] => (is => 'rw', isa => 'AI::MXNet::Symbol|ArrayRef[AI::MXNet::Symbol]');
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Gluon/Parameter.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Gluon/Parameter.pm
index c39d5d461c2c..475c2a93647e 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Gluon/Parameter.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Gluon/Parameter.pm
@@ -934,7 +934,6 @@ use overload
my $content = join("\n", map { AI::MXNet::Base::_indent(" $_", 2) } $self->values);
return "$name(\n$content\n)";
},
- '%{}' => sub { my %tmp = shift->_params->as_list; \%tmp },
'@{}' => sub { my @tmp = shift->_params->as_list; \@tmp },
fallback => 1;
@@ -1316,7 +1315,7 @@ method load(
);
next;
}
- $self->{ $name }->_load_init($arg_dict{$name}, $ctx);
+ $self->_params->get($name)->_load_init($arg_dict{$name}, $ctx);
}
}
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/IO.pm b/perl-package/AI-MXNet/lib/AI/MXNet/IO.pm
index fc3f960cc496..297ceb8c0b24 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/IO.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/IO.pm
@@ -24,7 +24,15 @@ use Scalar::Util qw/blessed/;
=head1 NAME
- AI::MXNet::IO - NDArray interface of mxnet.
+ AI::MXNet::IO - Data loading interface of MXNet
+=cut
+
+=head1 DESCRIPTION
+
+ This document summarizes supported data formats and iterator APIs to read the data including
+ mx->io Data iterators for common data formats.
+ mx->recordio Data iterators for the RecordIO data format.
+ mx->image Image Iterators and image augmentation functions.
=cut
# Convert data into canonical form.
@@ -626,6 +634,21 @@ extends 'AI::MXNet::DataIter';
AI::MXNet::MXDataIter - A data iterator pre-built in C++ layer of MXNet.
=cut
+=head1 DESCRIPTION
+
+ Here are the list of currently available predefined iterators, for more custom iterators
+ please check out the examples directory.
+ Also please refer to the L
+ mx->io->CSVIter Returns the CSV file iterator.
+ mx->io->LibSVMIter Returns the LibSVM iterator which returns data with csr storage type.
+ mx->io->ImageRecordIter Iterates on image RecordIO files
+ mx->io->ImageRecordUInt8Iter Iterating on image RecordIO files
+ mx->io->MNISTIter Iterating on the MNIST dataset.
+ mx->recordio->MXRecordIO Reads/writes RecordIO data format, supporting sequential read and write.
+ mx->recordio->MXIndexedRecordIO Reads/writes RecordIO data format, supporting random access.
+ mx->image->ImageIter Image data iterator with a large number of augmentation choices.
+=cut
+
has 'handle' => (is => 'ro', isa => 'DataIterHandle', required => 1);
has '_debug_skip_load' => (is => 'rw', isa => 'Int', default => 0);
has '_debug_at_begin' => (is => 'rw', isa => 'Int', default => 0);
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Image.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Image.pm
index 4f670b0e8e36..9c7fa120f343 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Image.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Image.pm
@@ -622,18 +622,18 @@ method CastAug()
=cut
method CreateAugmenter(
-Shape :$data_shape,
-Bool :$resize=0,
-Bool :$rand_crop=0,
-Bool :$rand_resize=0,
-Bool :$rand_mirror=0,
-Maybe[Num|PDL] :$mean=,
-Maybe[Num|PDL] :$std=,
-Num :$brightness=0,
-Num :$contrast=0,
-Num :$saturation=0,
-Num :$pca_noise=0,
-Int :$inter_method=2
+ Shape :$data_shape,
+ Bool :$resize=0,
+ Bool :$rand_crop=0,
+ Bool :$rand_resize=0,
+ Bool :$rand_mirror=0,
+ Maybe[Num|PDL] :$mean=,
+ Maybe[Num|PDL] :$std=,
+ Num :$brightness=0,
+ Num :$contrast=0,
+ Num :$saturation=0,
+ Num :$pca_noise=0,
+ Int :$inter_method=2
)
{
my @auglist;
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Initializer.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Initializer.pm
index 7c481efcc53f..fe8dce32e2d8 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Initializer.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Initializer.pm
@@ -73,6 +73,24 @@ has '_print_func' => (is => 'rw', isa => 'CodeRef', lazy => 1,
AI::MXNet::Initializer - Base class for all Initializers
+=head1 DESCRIPTION
+
+ The base class AI::MXNet::Initializer defines the default behaviors to initialize various parameters,
+ such as set bias to 1, except for the weight. Other classes then define how to initialize the weights.
+ Currently following classes are available:
+ mx->init->Uniform Initializes weights with random values uniformly sampled from a given range.
+ mx->init->Normal Initializes weights with random values sampled from a normal distribution with a mean of zero and standard deviation of sigma.
+ mx->init->Load Initializes variables by loading data from file or dict.
+ mx->init->Mixed Initialize parameters using multiple initializers.
+ mx->init->Zero Initializes weights to zero.
+ mx->init->One Initializes weights to one.
+ mx->init->Constant Initializes the weights to a given value.
+ mx->init->Orthogonal Initialize weight as orthogonal matrix.
+ mx->init->Xavier Returns an initializer performing “Xavier” initialization for weights.
+ mx->init->MSRAPrelu Initialize the weight according to a MSRA paper.
+ mx->init->Bilinear Initialize weight for upsampling layers.
+ mx->init->FusedRNN Initialize parameters for fused rnn layers.
+
=head2 register
Register an initializer class to the AI::MXNet::Initializer factory.
@@ -372,7 +390,7 @@ method call(Str $name, AI::MXNet::NDArray $arr)
=head1 NAME
- AI::MXNet::Mixed - A container for multiple initializer patterns.
+ AI::MXNet::Mixed - A container with multiple initializer patterns.
=cut
=head2 new
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/KVStore.pm b/perl-package/AI-MXNet/lib/AI/MXNet/KVStore.pm
index de66d91552c2..bb6631f459a9 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/KVStore.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/KVStore.pm
@@ -37,7 +37,6 @@ use AI::MXNet::Function::Parameters;
has 'handle' => (is => 'ro', isa => 'KVStoreHandle', required => 1);
has '_updater' => (is => 'rw', isa => 'AI::MXNet::Updater');
-has '_updater_func' => (is => 'rw', isa => 'CodeRef');
sub DEMOLISH
{
@@ -53,9 +52,9 @@ sub DEMOLISH
Parameters
----------
- key : str or an array ref of str
+ $key : Str|ArrayRef[Str]
The keys.
- value : NDArray or an array ref of NDArray objects
+ $value : AI::MXNet::NDArray|ArrayRef[AI::MXNet::NDArray]|ArrayRef[ArrayRef[AI::MXNet::NDArray]]
The values.
Examples
@@ -100,9 +99,9 @@ method init(
Parameters
----------
- key : str or array ref of str
- value : NDArray or array ref of NDArray or array ref of array refs of NDArray
- priority : int, optional
+ $key : Str|ArrayRef[Str]
+ $value : AI::MXNet::NDArray|ArrayRef[AI::MXNet::NDArray]|ArrayRef[ArrayRef[AI::MXNet::NDArray]]
+ :$priority=0 : Int, optional
The priority of the push operation.
The higher the priority, the faster this action is likely
to be executed before other push actions.
@@ -171,12 +170,12 @@ method push(
Parameters
----------
- key : str or array ref of str
+ $key : Str|ArrayRef[Str]
Keys
- out: NDArray or array ref of NDArray or array ref of array refs of NDArray
+ :$out: AI::MXNet::NDArray|ArrayRef[AI::MXNet::NDArray]|ArrayRef[ArrayRef[AI::MXNet::NDArray]]
According values
- priority : int, optional
+ :$priority=0 : Int, optional
The priority of the push operation.
The higher the priority, the faster this action is likely
to be executed before other push actions.
@@ -241,18 +240,18 @@ method pull(
Parameters
----------
- key : str, int, or sequence of str or int
+ $key : Str|ArrayRef[Str] $key
Keys.
- out: AI::MXNet::NDArray::RowSparse or array ref of AI::MXNet::NDArray::RowSparse or array ref of array ref of AI::MXNet::NDArray::RowSparse
+ :$out: AI::MXNet::NDArray::RowSparse|ArrayRef[AI::MXNet::NDArray::RowSparse]|ArrayRef[ArrayRef[AI::MXNet::NDArray::RowSparse]]
Values corresponding to the keys. The stype is expected to be row_sparse
- priority : int, optional
+ :$priority=0 : Int, optional
The priority of the pull operation.
Higher priority pull operations are likely to be executed before
other pull actions.
- row_ids : AI::MXNet::NDArray or array ref of AI::MXNet::NDArray
+ :$row_ids : AI::MXNet::NDArray|ArrayRef[AI::MXNet::NDArray]|ArrayRef[ArrayRef[AI::MXNet::NDArray]]
The row_ids for which to pull for each value. Each row_id is an 1D NDArray
whose values don't have to be unique nor sorted.
@@ -364,7 +363,7 @@ method row_sparse_pull(
Parameters
----------
- compression_params : HashRef
+ $compression_params : HashRef[Str]
A dictionary specifying the type and parameters for gradient compression.
The key `type` in this dictionary is a
required string argument and specifies the type of gradient compression.
@@ -401,7 +400,7 @@ method set_gradient_compression(HashRef[Str] $compression_params)
Parameters
----------
- optimizer : Optimizer
+ $optimizer : AI::MXNet::Optimizer
the optimizer
=cut
@@ -426,7 +425,7 @@ method set_optimizer(AI::MXNet::Optimizer $optimizer)
Returns
-------
- type : str
+ $type : Str
the string type
=cut
@@ -441,7 +440,7 @@ method type()
Returns
-------
- rank : int
+ $rank : Int
The rank of this node, which is in [0, get_num_workers())
=cut
@@ -456,7 +455,7 @@ method rank()
Returns
-------
- size :int
+ $size : Int
The number of worker nodes
=cut
@@ -471,9 +470,9 @@ method num_workers()
Parameters
----------
- fname : str
+ $fname : Str
Path to output states file.
- dump_optimizer : bool, default False
+ :$dump_optimizer=0 : Bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
=cut
@@ -493,7 +492,7 @@ method save_optimizer_states(Str $fname, Bool :$dump_optimizer=0)
Parameters
----------
- fname : str
+ $fname : Str
Path to input states file.
=cut
@@ -517,7 +516,7 @@ method load_optimizer_states(Str $fname)
Parameters
----------
- updater : function
+ $updater : Undater
the updater function
Examples
@@ -540,20 +539,17 @@ method load_optimizer_states(Str $fname)
method _set_updater(Updater $updater_func)
{
- $self->_updater_func(
- sub {
- my ($index, $input_handle, $storage_handle) = @_;
- $updater_func->(
- $index,
- AI::MXNet::NDArray->_ndarray_cls($input_handle),
- AI::MXNet::NDArray->_ndarray_cls($storage_handle)
- );
- }
- );
check_call(
AI::MXNetCAPI::KVStoreSetUpdater(
$self->handle,
- $self->_updater_func
+ sub {
+ my ($index, $input_handle, $storage_handle) = @_;
+ $updater_func->(
+ $index,
+ AI::MXNet::NDArray->_ndarray_cls($input_handle),
+ AI::MXNet::NDArray->_ndarray_cls($storage_handle)
+ );
+ }
)
);
}
@@ -583,9 +579,9 @@ method _barrier()
Parameters
----------
- head : int
+ $head : Int
the head of the command
- body : str
+ $body : Str
the body of the command
=cut
@@ -606,7 +602,7 @@ method _send_command_to_servers(Int $head, Str $body)
Parameters
----------
- name : {'local'}
+ $name='local' : Str
The type of KVStore
- local works for multiple devices on a single machine (single process)
- dist works for multi-machines (multiple processes)
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/KVStoreServer.pm b/perl-package/AI-MXNet/lib/AI/MXNet/KVStoreServer.pm
index 4c274b92c71f..39e152a6d641 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/KVStoreServer.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/KVStoreServer.pm
@@ -27,7 +27,7 @@ use AI::MXNet::Function::Parameters;
=head1 NAME
- AI::MXNet::KVStoreServer - The key-value store server
+ AI::MXNet::KVStoreServer - The key-value store server.
=cut
=head2 new
@@ -36,7 +36,7 @@ use AI::MXNet::Function::Parameters;
Parameters
----------
- kvstore : KVStore
+ kvstore : AI::MXNet::KVStore
=cut
has 'kvstore' => (is => 'ro', isa => 'AI::MXNet::KVStore', required => 1);
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/LRScheduler.pm b/perl-package/AI-MXNet/lib/AI/MXNet/LRScheduler.pm
index 27420f45167d..5575e37f75fe 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/LRScheduler.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/LRScheduler.pm
@@ -58,7 +58,7 @@ has 'base_lr' => (is => 'rw', isa => 'Num', default => 0.01);
Parameters
----------
- num_update: int
+ $num_update: Int
the maximal number of updates applied to a weight.
=cut
@@ -76,9 +76,9 @@ package AI::MXNet::FactorScheduler;
Parameters
----------
- step: int
+ step: Int
schedule the learning rate update after n updates
- factor: float
+ factor: Num
the factor by which to reduce the learning rate.
=cut
use Mouse;
@@ -138,9 +138,9 @@ package AI::MXNet::MultiFactorScheduler;
Parameters
----------
- step: array ref of int
+ step: ArrayRef[Int]
schedule learning rate after n updates
- factor: float
+ factor: Num
the factor for reducing the learning rate
=cut
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/LinAlg.pm b/perl-package/AI-MXNet/lib/AI/MXNet/LinAlg.pm
index 9290e68d4561..be1262fb6a87 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/LinAlg.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/LinAlg.pm
@@ -21,6 +21,54 @@ use warnings;
use AI::MXNet::LinAlg::Symbol;
use AI::MXNet::LinAlg::NDArray;
+=head1 NAME
+
+ AI::MXNet::LinAlg - Linear Algebra routines for NDArray and Symbol.
+=cut
+
+=head1 DESCRIPTION
+
+ The Linear Algebra API, provides imperative/symbolic linear algebra tensor operations on CPU/GPU.
+
+ mx->linalg->->gemm Performs general matrix multiplication and accumulation.
+ mx->linalg->->gemm2 Performs general matrix multiplication.
+ mx->linalg->->potrf Performs Cholesky factorization of a symmetric positive-definite matrix.
+ mx->linalg->->potri Performs matrix inversion from a Cholesky factorization.
+ mx->linalg->->trmm Performs multiplication with a lower triangular matrix.
+ mx->linalg->->trsm Solves matrix equation involving a lower triangular matrix.
+ mx->linalg->->sumlogdiag Computes the sum of the logarithms of the diagonal elements of a square matrix.
+ mx->linalg->->syrk Multiplication of matrix with its transpose.
+ mx->linalg->->gelqf LQ factorization for general matrix.
+ mx->linalg->->syevd Eigendecomposition for symmetric matrix.
+ L
+ L
+
+ Examples:
+
+ ## NDArray
+ my $A = mx->nd->array([[1.0, 1.0], [1.0, 1.0]]);
+ my $B = mx->nd->array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]);
+ ok(almost_equal(
+ mx->nd->linalg->gemm2($A, $B, transpose_b=>1, alpha=>2.0)->aspdl,
+ pdl([[4.0, 4.0, 4.0], [4.0, 4.0, 4.0]])
+ ));
+
+ ## Symbol
+ my $sym_gemm2 = mx->sym->linalg->gemm2(
+ mx->sym->var('A'),
+ mx->sym->var('B'),
+ transpose_b => 1,
+ alpha => 2.0
+ );
+ my $A = mx->nd->array([[1.0, 1.0], [1.0, 1.0]]);
+ my $B = mx->nd->array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]);
+ ok(almost_equal(
+ $sym_gemm2->eval(args => { A => $A, B => $B })->[0]->aspdl,
+ pdl([[4.0, 4.0, 4.0], [4.0, 4.0, 4.0]])
+ ));
+
+=cut
+
sub sym { 'AI::MXNet::LinAlg::Symbol' }
sub symbol { 'AI::MXNet::LinAlg::Symbol' }
sub nd { 'AI::MXNet::LinAlg::NDArray' }
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Metric.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Metric.pm
index 3b9345d8baf9..b6e91aeaf729 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Metric.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Metric.pm
@@ -24,7 +24,11 @@ use JSON::PP;
=head1 NAME
- AI::MXNet::Metric - Online evaluation metric module.
+ AI::MXNet::Metric - Evaluation Metric API.
+=head1 DESCRIPTION
+
+ This module hosts all the evaluation metrics available to evaluate the performance of a learned model.
+ L
=cut
# Check to see if the two arrays are the same size.
@@ -61,11 +65,6 @@ func check_label_shapes(
) unless $pred_shape == $label_shape;
}
-=head1 DESCRIPTION
-
- Base class of all evaluation metrics.
-=cut
-
package AI::MXNet::EvalMetric;
use Mouse;
use overload '""' => sub {
@@ -232,11 +231,41 @@ method get()
# CLASSIFICATION METRICS
########################
+=head1 NAME
+
+ AI::MXNet::Accuracy - Computes accuracy classification score.
+=cut
+
+=head1 DESCRIPTION
+
+ The accuracy score is defined as
+
+ accuracy(y, y^) = (1/n) * sum(i=0..n−1) { y^(i)==y(i) }
+
+ Parameters:
+ axis (Int, default=1) – The axis that represents classes.
+ name (Str, default='accuracy') – Name of this metric instance for display.
+
+ pdl> use AI::MXNet qw(mx)
+ pdl> $predicts = [mx->nd->array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
+ pdl> $labels = [mx->nd->array([[0, 1, 1]])]
+ pdl> $acc = mx->metric->Accuracy()
+ pdl> $acc->update($labels, $predicts)
+ pdl> use Data::Dumper
+ pdl> print Dumper([$acc->get])
+ $VAR1 = [
+ 'accuracy',
+ '0.666666666666667'
+ ];
+
+=cut
+
package AI::MXNet::Accuracy;
use Mouse;
use AI::MXNet::Base;
extends 'AI::MXNet::EvalMetric';
has '+name' => (default => 'accuracy');
+has 'axis' => (is => 'ro', isa => 'Int', default => 1);
method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray] $preds)
{
@@ -245,22 +274,74 @@ method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray]
my ($label, $pred_label) = @$_;
if(join(',', @{$pred_label->shape}) ne join(',', @{$label->shape}))
{
- $pred_label = AI::MXNet::NDArray->argmax_channel($pred_label);
+ $pred_label = AI::MXNet::NDArray->argmax_channel($pred_label, { axis => $self->axis });
}
- AI::MXNet::Metric::check_label_shapes($label, $pred_label);
my $sum = ($pred_label->aspdl->flat == $label->aspdl->flat)->sum;
$self->sum_metric($self->sum_metric + $sum);
$self->num_inst($self->num_inst + $pred_label->size);
}
}
+=head1 NAME
+
+ AI::MXNet::TopKAccuracy - Computes top k predictions accuracy.
+=cut
+
+=head1 DESCRIPTION
+
+ TopKAccuracy differs from Accuracy in that it considers the prediction
+ to be True as long as the ground truth label is in the top K predicated labels.
+
+ If top_k = 1, then TopKAccuracy is identical to Accuracy.
+
+ Parameters:
+ top_k(Int, default 1) – Whether targets are in top k predictions.
+ name (Str, default 'top_k_accuracy') – Name of this metric instance for display.
+
+ use AI::MXNet qw(mx);
+ $top_k = 3;
+ $predicts = [mx->nd->array(
+ [[0.80342804, 0.5275223 , 0.11911147, 0.63968144, 0.09092526,
+ 0.33222568, 0.42738095, 0.55438581, 0.62812652, 0.69739294],
+ [0.78994969, 0.13189035, 0.34277045, 0.20155961, 0.70732423,
+ 0.03339926, 0.90925004, 0.40516066, 0.76043547, 0.47375838],
+ [0.28671892, 0.75129249, 0.09708994, 0.41235779, 0.28163896,
+ 0.39027778, 0.87110921, 0.08124512, 0.55793117, 0.54753428],
+ [0.33220307, 0.97326881, 0.2862761 , 0.5082575 , 0.14795074,
+ 0.19643398, 0.84082001, 0.0037532 , 0.78262101, 0.83347772],
+ [0.93790734, 0.97260166, 0.83282304, 0.06581761, 0.40379256,
+ 0.37479349, 0.50750135, 0.97787696, 0.81899021, 0.18754124],
+ [0.69804812, 0.68261077, 0.99909815, 0.48263116, 0.73059268,
+ 0.79518236, 0.26139168, 0.16107376, 0.69850315, 0.89950917],
+ [0.91515562, 0.31244902, 0.95412616, 0.7242641 , 0.02091039,
+ 0.72554552, 0.58165923, 0.9545687 , 0.74233195, 0.19750339],
+ [0.94900651, 0.85836332, 0.44904621, 0.82365038, 0.99726878,
+ 0.56413064, 0.5890016 , 0.42402702, 0.89548786, 0.44437266],
+ [0.57723744, 0.66019353, 0.30244304, 0.02295771, 0.83766937,
+ 0.31953292, 0.37552193, 0.18172362, 0.83135182, 0.18487429],
+ [0.96968683, 0.69644561, 0.60566253, 0.49600661, 0.70888438,
+ 0.26044186, 0.65267488, 0.62297362, 0.83609334, 0.3572364 ]]
+ )];
+ $labels = [mx->nd->array([2, 6, 9, 2, 3, 4, 7, 8, 9, 6])];
+ $acc = mx->metric->TopKAccuracy(top_k=>$top_k);
+ $acc->update($labels, $predicts);
+ use Data::Dumper;
+ print Dumper([$acc->get]);
+ $VAR1 = [
+ 'top_k_accuracy_3',
+ '0.3'
+ ];
+
+
+=cut
+
package AI::MXNet::TopKAccuracy;
use Mouse;
use List::Util qw/min/;
use AI::MXNet::Base;
extends 'AI::MXNet::EvalMetric';
has '+name' => (default => 'top_k_accuracy');
-has 'top_k' => (is => 'rw', isa => 'int', default => 1);
+has 'top_k' => (is => 'rw', isa => 'Int', default => 1);
method python_constructor_arguments() { ['top_k'] }
sub BUILD
@@ -302,71 +383,250 @@ method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray]
}
}
-# Calculate the F1 score of a binary classification problem.
-package AI::MXNet::F1;
-use Mouse;
-use AI::MXNet::Base;
-extends 'AI::MXNet::EvalMetric';
-has '+name' => (default => 'f1');
-
-method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray] $preds)
-{
- AI::MXNet::Metric::check_label_shapes($labels, $preds);
- for(zip($labels, $preds)) {
- my ($label, $pred_label) = @$_;
- AI::MXNet::Metric::check_label_shapes($label, $pred_label);
- $pred_label = $pred_label->aspdl->maximum_ind;
+package _BinaryClassificationMetrics {
+ use Mouse;
+ #Private container class for classification metric statistics. True/false positive and
+ # true/false negative counts are sufficient statistics for various classification metrics.
+ #This class provides the machinery to track those statistics across mini-batches of
+ #(label, prediction) pairs.
+ has [qw/true_positives
+ false_negatives
+ false_positives
+ true_negatives/] => (is => 'rw', isa => 'Int', default => 0);
+
+ method update_binary_stats(AI::MXNet::NDArray $label, AI::MXNet::NDArray $pred)
+ {
+ $pred = AI::MXNet::NDArray->argmax($pred, { axis => 1 })->aspdl;
$label = $label->astype('int32')->aspdl;
- confess("F1 currently only supports binary classification.")
- if $label->uniq->shape->at(0) > 2;
- my ($true_positives, $false_positives, $false_negatives) = (0,0,0);
- for(zip($pred_label->unpdl, $label->unpdl)) {
- my ($y_pred, $y_true) = @$_;
- if($y_pred == 1 and $y_true == 1)
- {
- $true_positives += 1;
- }
- elsif($y_pred == 1 and $y_true == 0)
- {
- $false_positives += 1;
- }
- elsif($y_pred == 0 and $y_true == 1)
- {
- $false_negatives += 1;
- }
+
+ AI::MXNet::Metric::check_label_shapes($label, $pred);
+ if($label->uniq->len > 2)
+ {
+ confess("Currently only support binary classification.");
}
- my $precision;
- my $recall;
- if($true_positives + $false_positives > 0)
+
+ my $pred_true = ($pred == 1);
+ my $pred_false = 1 - $pred_true;
+ my $label_true = ($label == 1);
+ my $label_false = 1 - $label_true;
+
+ $self->true_positives($self->true_positives + ($pred_true * $label_true)->sum);
+ $self->false_positives($self->false_positives + ($pred_true * $label_false)->sum);
+ $self->false_negatives($self->false_negatives + ($pred_false * $label_true)->sum);
+ $self->true_negatives($self->true_negatives + ($pred_false * $label_false)->sum);
+ }
+
+ method precision()
+ {
+ if($self->true_positives + $self->false_positives > 0)
{
- $precision = $true_positives / ($true_positives + $false_positives);
+ return $self->true_positives / ($self->true_positives + $self->false_positives);
}
else
{
- $precision = 0;
+ return 0;
}
- if($true_positives + $false_negatives > 0)
+ }
+
+ method recall()
+ {
+ if($self->true_positives + $self->false_negatives > 0)
{
- $recall = $true_positives / ($true_positives + $false_negatives);
+ return $self->true_positives / ($self->true_positives + $self->false_negatives);
}
else
{
- $recall = 0;
+ return 0;
}
- my $f1_score;
- if($precision + $recall > 0)
+ }
+
+ method fscore()
+ {
+ if($self->precision + $self->recall > 0)
{
- $f1_score = 2 * $precision * $recall / ($precision + $recall);
+ return 2 * $self->precision * $self->recall / ($self->precision + $self->recall);
}
else
{
- $f1_score = 0;
+ return 0;
+ }
+ }
+
+ method matthewscc()
+ {
+ if(not $self->total_examples)
+ {
+ return 0;
+ }
+ my @terms = (
+ $self->true_positives + $self->false_positives,
+ $self->true_positives + $self->false_negatives,
+ $self->true_negatives + $self->false_positives,
+ $self->true_negatives + $self->false_negatives
+ );
+ my $denom = 1;
+ for my $t (grep { $_ } @terms)
+ {
+ $denom *= $t;
+ }
+ return (($self->true_positives * $self->true_negatives) - ($self->false_positives * $self->false_negatives)) / sqrt($denom);
+ }
+
+ method total_examples()
+ {
+ return $self->false_negatives + $self->false_positives +
+ $self->true_negatives + $self->true_positives;
+ }
+
+ method reset_stats()
+ {
+ $self->false_positives(0);
+ $self->false_negatives(0);
+ $self->true_positives(0);
+ $self->true_negatives(0);
+ }
+};
+
+=head1 NAME
+
+ AI::MXNet::F1 - Calculate the F1 score of a binary classification problem.
+=cut
+
+=head1 DESCRIPTION
+
+ The F1 score is equivalent to harmonic mean of the precision and recall,
+ where the best value is 1.0 and the worst value is 0.0. The formula for F1 score is:
+
+ F1 = 2 * (precision * recall) / (precision + recall)
+ The formula for precision and recall is:
+
+ precision = true_positives / (true_positives + false_positives)
+ recall = true_positives / (true_positives + false_negatives)
+ Note:
+
+ This F1 score only supports binary classification.
+
+ Parameters:
+ name (Str, default 'f1') – Name of this metric instance for display.
+ average (Str, default 'macro') –
+ Strategy to be used for aggregating across mini-batches.
+ “macro”: average the F1 scores for each batch. “micro”: compute a single F1 score across all batches.
+
+
+ $predicts = [mx.nd.array([[0.3, 0.7], [0., 1.], [0.4, 0.6]])];
+ $labels = [mx.nd.array([0., 1., 1.])];
+ $f1 = mx->metric->F1();
+ $f1->update($labels, $predicts);
+ print $f1->get;
+ f1 0.8
+
+=cut
+
+package AI::MXNet::F1;
+use Mouse;
+use AI::MXNet::Base;
+extends 'AI::MXNet::EvalMetric';
+has '+name' => (default => 'f1');
+has 'average' => (is => 'ro', isa => 'Str', default => 'macro');
+has 'metrics' => (is => 'rw', init_arg => undef, default => sub { _BinaryClassificationMetrics->new });
+has 'method' => (is => 'ro', init_arg => undef, default => 'fscore');
+method python_constructor_arguments() { [qw/name average/] }
+
+method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray] $preds)
+{
+ my $method = $self->method;
+ AI::MXNet::Metric::check_label_shapes($labels, $preds);
+ for(zip($labels, $preds)) {
+ my ($label, $pred) = @$_;
+ $self->metrics->update_binary_stats($label, $pred);
+ if($self->average eq "macro")
+ {
+ $self->sum_metric($self->sum_metric + $self->metrics->$method);
+ $self->num_inst($self->num_inst + 1);
+ $self->metrics->reset_stats();
+ }
+ else
+ {
+ $self->sum_metric($self->metrics->fscore * $self->metrics->total_examples);
+ $self->num_inst($self->metrics->total_examples);
}
- $self->sum_metric($self->sum_metric + $f1_score);
- $self->num_inst($self->num_inst + 1);
}
}
+method reset()
+{
+ $self->sum_metric(0);
+ $self->num_inst(0);
+ $self->metrics->reset_stats();
+}
+
+=head1 NAME
+
+ AI::MXNet::MCC - Computes the Matthews Correlation Coefficient of a binary classification problem.
+=cut
+
+=head1 DESCRIPTION
+
+ While slower to compute than F1 the MCC can give insight that F1 or Accuracy cannot.
+ For instance, if the network always predicts the same result
+ then the MCC will immeadiately show this. The MCC is also symetric with respect
+ to positive and negative categorization, however, there needs to be both
+ positive and negative examples in the labels or it will always return 0.
+ MCC of 0 is uncorrelated, 1 is completely correlated, and -1 is negatively correlated.
+
+ MCC = (TP * TN - FP * FN)/sqrt( (TP + FP)*( TP + FN )*( TN + FP )*( TN + FN ) )
+
+ where 0 terms in the denominator are replaced by 1.
+
+ This version of MCC only supports binary classification.
+
+ Parameters
+ ----------
+ name : str, 'mcc'
+ Name of this metric instance for display.
+ average : str, default 'macro'
+ Strategy to be used for aggregating across mini-batches.
+ "macro": average the MCC for each batch.
+ "micro": compute a single MCC across all batches.
+
+ Examples
+ --------
+ In this example the network almost always predicts positive
+ >>> $false_positives = 1000
+ >>> $false_negatives = 1
+ >>> $true_positives = 10000
+ >>> $true_negatives = 1
+ >>> $predicts = [mx->nd->array(
+ [
+ ([.3, .7])x$false_positives,
+ ([.7, .3])x$true_negatives,
+ ([.7, .3])x$false_negatives,
+ ([.3, .7])xtrue_positives
+ ]
+ )];
+ >>> $labels = [mx->nd->array(
+ [
+ (0)x($false_positives + $true_negatives),
+ (1)x($false_negatives + $true_positives)
+ ]
+ )];
+ >>> $f1 = mx->metric->F1();
+ >>> $f1->update($labels, $predicts);
+ >>> $mcc = mx->metric->MCC()
+ >>> $mcc->update($labels, $predicts)
+ >>> print $f1->get();
+ f1 0.95233560306652054
+ >>> print $mcc->get();
+ mcc 0.01917751877733392
+
+=cut
+
+package AI::MXNet::MCC;
+use Mouse;
+extends 'AI::MXNet::F1';
+has '+name' => (default => 'mcc');
+has '+method' => (default => 'matthewscc');
+
package AI::MXNet::Perplexity;
use Mouse;
use AI::MXNet::Base;
@@ -385,12 +645,13 @@ around BUILDARGS => sub {
=head1 NAME
- AI::MXNet::Perplexity
+ AI::MXNet::Perplexity - Calculate perplexity.
=cut
=head1 DESCRIPTION
- Calculate perplexity.
+ Perplexity is a measurement of how well a probability distribution or model predicts a sample.
+ A low perplexity indicates the model is good at predicting the sample.
Parameters
----------
@@ -402,6 +663,14 @@ around BUILDARGS => sub {
The axis from prediction that was used to
compute softmax. By default uses the last
axis.
+
+ $predicts = [mx->nd->array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])];
+ $labels = [mx->nd->array([0, 1, 1])];
+ $perp = mx->metric->Perplexity(ignore_label=>undef);
+ $perp->update($labels, $predicts);
+ print $perp->get()
+ Perplexity 1.77109762851559
+
=cut
method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray] $preds)
@@ -440,7 +709,21 @@ method get()
# REGRESSION METRICS
####################
-# Calculate Mean Absolute Error loss
+=head1 NAME
+
+ AI::MXNet::MAE - Calculate Mean Absolute Error loss
+=head1 DESCRIPTION
+
+ >>> $predicts = [mx->nd->array([3, -0.5, 2, 7])->reshape([4,1])]
+ >>> $labels = [mx->nd->array([2.5, 0.0, 2, 8])->reshape([4,1])]
+ >>> $mean_absolute_error = mx->metric->MAE()
+ >>> $mean_absolute_error->update($labels, $predicts)
+ >>> print $mean_absolute_error->get()
+ ('mae', 0.5)
+
+=cut
+
+
package AI::MXNet::MAE;
use Mouse;
use AI::MXNet::Base;
@@ -463,7 +746,20 @@ method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray]
}
}
-# Calculate Mean Squared Error loss
+=head1 NAME
+
+ AI::MXNet::MSE - Calculate Mean Squared Error loss
+=head1 DESCRIPTION
+
+ >>> $predicts = [mx->nd->array([3, -0.5, 2, 7])->reshape([4,1])]
+ >>> $labels = [mx->nd->array([2.5, 0.0, 2, 8])->reshape([4,1])]
+ >>> $mean_squared_error = mx->metric->MSE()
+ >>> $mean_squared_error->update($labels, $predicts)
+ >>> print $mean_squared_error->get()
+ ('mse', 0.375)
+
+=cut
+
package AI::MXNet::MSE;
use Mouse;
use AI::MXNet::Base;
@@ -486,7 +782,20 @@ method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray]
}
}
-# Calculate Root Mean Squred Error loss
+=head1 NAME
+
+ AI::MXNet::RMSE - Calculate Root Mean Squred Error loss
+=head1 DESCRIPTION
+
+ >>> $predicts = [mx->nd->array([3, -0.5, 2, 7])->reshape([4,1])]
+ >>> $labels = [mx->nd->array([2.5, 0.0, 2, 8])->reshape([4,1])]
+ >>> $root_mean_squared_error = mx->metric->RMSE()
+ >>> $root_mean_squared_error->update($labels, $predicts)
+ >>> print $root_mean_squared_error->get()
+ 'rmse', 0.612372457981
+
+=cut
+
package AI::MXNet::RMSE;
use Mouse;
use AI::MXNet::Base;
@@ -509,6 +818,21 @@ method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray]
}
}
+
+=head1 NAME
+
+ AI::MXNet::CrossEntropy - Calculate Cross Entropy loss
+=head1 DESCRIPTION
+
+ >>> $predicts = [mx->nd->array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
+ >>> $labels = [mx->nd->array([0, 1, 1])]
+ >>> $ce = mx->metric->CrossEntropy()
+ >>> $ce->update($labels, $predicts)
+ >>> print $ce->get()
+ ('cross-entropy', 0.57159948348999023)
+
+=cut
+
# Calculate Cross Entropy loss
package AI::MXNet::CrossEntropy;
use Mouse;
@@ -537,6 +861,26 @@ method update(ArrayRef[AI::MXNet::NDArray] $labels, ArrayRef[AI::MXNet::NDArray]
}
}
+=head1 NAME
+
+ AI::MXNet::NegativeLogLikelihood - Computes the negative log-likelihood loss.
+=head1 DESCRIPTION
+
+ >>> $predicts = [mx->nd->array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
+ >>> $labels = [mx->nd->array([0, 1, 1])]
+ >>> $nll_loss = mx->metric->NegativeLogLikelihood
+ >>> $nll_loss->update($labels, $predicts)
+ >>> print $nll_loss->get()
+ ('cross-entropy', 0.57159948348999023)
+
+=cut
+
+package AI::MXNet::NegativeLogLikelihood;
+use Mouse;
+use AI::MXNet::Base;
+extends 'AI::MXNet::CrossEntropy';
+has '+name' => (default => 'nll_loss');
+
package AI::MXNet::PearsonCorrelation;
use Mouse;
use AI::MXNet::Base;
@@ -545,7 +889,7 @@ has '+name' => (default => 'pearson-correlation');
=head1 NAME
- AI::MXNet::PearsonCorrelation
+ AI::MXNet::PearsonCorrelation - Computes Pearson correlation.
=cut
=head1 DESCRIPTION
@@ -594,7 +938,7 @@ has '+name' => (default => 'loss');
=head1 NAME
- AI::MXNet::Loss
+ AI::MXNet::Loss - Dummy metric for directly printing loss.
=cut
=head1 DESCRIPTION
@@ -621,7 +965,7 @@ use Mouse;
=head1 NAME
- AI::MXNet::Confidence
+ AI::MXNet::Confidence - Accuracy by confidence buckets.
=cut
=head1 DESCRIPTION
@@ -717,7 +1061,7 @@ sub get
=head1 NAME
- AI::MXNet::CustomMetric
+ AI::MXNet::CustomMetric - Custom evaluation metric that takes a sub ref.
=cut
=head1 DESCRIPTION
@@ -779,7 +1123,9 @@ my %metrics = qw/
accuracy AI::MXNet::Accuracy
ce AI::MXNet::CrossEntropy
crossentropy AI::MXNet::CrossEntropy
+ nll_loss AI::MXNet::NegativeLogLikelihood
f1 AI::MXNet::F1
+ mcc AI::MXNet::MCC
mae AI::MXNet::MAE
mse AI::MXNet::MSE
rmse AI::MXNet::RMSE
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Module.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Module.pm
index 16c9a92d73ae..38c2ae645969 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Module.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Module.pm
@@ -268,28 +268,28 @@ method BucketingModule(@args) { return AI::MXNet::Module::Bucketing->new(@args)
Parameters
----------
- prefix : str
+ $prefix : Str
path prefix of saved model files. You should have
"prefix-symbol.json", "prefix-xxxx.params", and
optionally "prefix-xxxx.states", where xxxx is the
epoch number.
- epoch : int
+ $epoch : Int
epoch to load.
- load_optimizer_states : bool
+ $load_optimizer_states=0 : Bool
whether to load optimizer states. Checkpoint needs
to have been made with save_optimizer_states=True.
- data_names : array ref of str
+ :$data_names : array ref of str
Default is ['data'] for a typical model used in image classification.
- label_names : array ref of str
+ :$label_names : array ref of str
Default is ['softmax_label'] for a typical model used in image
classification.
- logger : Logger
+ :$logger : Logger
Default is AI::MXNet::Logging.
- context : Context or list of Context
+ :$context : Context or list of Context
Default is cpu(0).
- work_load_list : array ref of number
+ :$work_load_list : array ref of number
Default is undef, indicating an uniform workload.
- fixed_param_names: array ref of str
+ :$fixed_param_names: array ref of str
Default is undef, indicating no network parameters are fixed.
=cut
@@ -319,11 +319,11 @@ method load(
Parameters
----------
- prefix : str
+ $prefix : Str
The file prefix to checkpoint to
- epoch : int
+ $epoch : Int
The current epoch number
- save_optimizer_states : bool
+ $save_optimizer_states=0 : Bool
Whether to save optimizer states for later training
=cut
@@ -348,16 +348,16 @@ method save_checkpoint(Str $prefix, Int $epoch, Bool $save_optimizer_states=0)
Parameters
----------
- prefix : str
+ $prefix : Str
Prefix of model name.
- epoch : int
+ $epoch : Int
The epoch number of the model.
- symbol : AI::MXNet::Symbol
+ $symbol : AI::MXNet::Symbol
The input symbol
- arg_params : hash ref of str to AI::MXNet::NDArray
- Model parameter, hash ref of name to AI::MXNet::NDArray of net's weights.
- aux_params : hash ref of str to NDArray
- Model parameter, hash ref of name to AI::MXNet::NDArray of net's auxiliary states.
+ $arg_params : HashRef[AI::MXNet::NDArray]
+ Model's parameters, hash ref of name to AI::MXNet::NDArray of net's weights.
+ $aux_params : HashRef[AI::MXNet::NDArray]
+ Model's parameters, hash ref of name to AI::MXNet::NDArray of net's auxiliary states.
Notes
-----
- prefix-symbol.json will be saved for symbol.
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Monitor.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Monitor.pm
index 0e46c31348a3..76fdfd24e7e8 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Monitor.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Monitor.pm
@@ -30,13 +30,13 @@ use AI::MXNet::Base;
Parameters
----------
- interval : int
+ interval : Int
Number of batches between printing.
- stat_func : function
+ stat_func : CodeRef
a function that computes statistics of tensors.
Takes a NDArray and returns a NDArray. defaults to mean
absolute value |x|/size(x).
- pattern : str
+ pattern : Str
A regular expression specifying which tensors to monitor.
Only tensors with names that match name_pattern will be included.
For example, '.*weight|.*output' will print all weights and outputs;
@@ -94,7 +94,7 @@ has 'stat_helper' => (
Parameters
----------
- exe : AI::MXNet::Executor
+ $exe : AI::MXNet::Executor
the Executor (returned by $symbol->bind) to install to.
=cut
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/NDArray.pm b/perl-package/AI-MXNet/lib/AI/MXNet/NDArray.pm
index 3177a3705941..873953192933 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/NDArray.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/NDArray.pm
@@ -22,6 +22,41 @@ package AI::MXNet::NDArray;
AI::MXNet::NDArray - Multidimensional tensor object of MXNet.
=cut
+=head1 DESCRIPTION
+
+ AI::MXNet::NDArray - Imperative tensor operations on CPU/GPU
+ In AI::MXNet, NDArray is the core data structure for all mathematical computations.
+ An NDArray represents a multidimensional, fixed-size homogenous array.
+ If you’re familiar with the PDL, you might notice some similarities.
+ However, NDArray is row-major, unlike the PDL that is column-major.
+ Like the PDL, MXNet’s NDArray enables imperative computation.
+
+ Some NDArray advandages compared to PDL:
+ MXNet’s NDArray supports fast execution on a wide range of hardware configurations, including CPU, GPU, and multi-GPU machines.
+ MXNet also scales to distributed systems in the cloud.
+ MXNet’s NDArray executes code lazily, allowing it to automatically parallelize multiple operations across the available hardware.
+
+ An NDArray is a multidimensional array of numbers with the same type.
+ We could represent the coordinates of a point in 3D space, e.g. [2, 1, 6] as a 1D array with shape (3).
+ Similarly, we could represent a 2D array.
+ Below, we present an array with length 2 along the first axis and length 3 along the second axis.
+
+ [[0, 1, 2]
+ [3, 4, 5]]
+ Note that here the use of “dimension” is overloaded. When we say a 2D array, we mean an array with 2 axes, not an array with two components.
+
+ Each NDArray supports some important attributes that you’ll often want to query:
+
+ $ndarray->shape: The dimensions of the array.
+ It is an array ref of integers indicating the length of the array along each axis.
+ For a matrix with $n rows and $m columns, its shape will be [$n, $m].
+ $ndarray->dtype: A string describing the type of its elements.
+ Dtype (defined in AI::MXNet::Types) is one of (float32 float64 float16 uint8 int8 int32 int64)
+ $ndarray->size: The total number of components in the array - equal to the product of the components of its shape.
+ $ndarray->context: The device on which this array is stored, represented by an object of AI::MXNet::Context class, e.g. cpu() or gpu(1).
+
+=cut
+
use strict;
use warnings;
use AI::MXNet::Base;
@@ -693,35 +728,6 @@ method onehot_encode(AI::MXNet::NDArray $indices, AI::MXNet::NDArray $out)
return __PACKAGE__->_onehot_encode($indices, $out, { out => $out });
}
-=head2 _ufunc_helper(lhs, rhs, fn_array, lfn_scalar, rfn_scalar):
-
- Helper function for element-wise operation
- The function will perform numpy-like broadcasting if needed and call different functions
-
- Parameters
- ----------
- lhs : NDArray or numeric value
- left hand side operand
-
- rhs : NDArray or numeric value
- right hand side operand
-
- fn_array : function
- function to be called if both lhs and rhs are of NDArray type
-
- lfn_scalar : function
- function to be called if lhs is NDArray while rhs is numeric value
-
- rfn_scalar : function
- function to be called if lhs is numeric value while rhs is NDArray;
- if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
-
- Returns
- -------
- out: NDArray
- result array
-=cut
-
sub _ufunc_helper
{
my ($lhs, $rhs, $fn_array, $lfn_scalar, $rfn_scalar, $reverse) = @_;
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/NDArray/Sparse.pm b/perl-package/AI-MXNet/lib/AI/MXNet/NDArray/Sparse.pm
index bb5171c238b6..e0257fd0238b 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/NDArray/Sparse.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/NDArray/Sparse.pm
@@ -356,9 +356,6 @@ extends 'AI::MXNet::NDArray::Sparse';
csr_matrix: Several ways to construct a CSRNDArray
=cut
-# def __reduce__(self):
-# return CSRNDArray, (None,), super(CSRNDArray, self).__getstate__()
-
use overload '+=' => sub { ($_[0] + $_[1])->copyto($_[0]) },
'-=' => sub { ($_[0] - $_[1])->copyto($_[0]) },
'*=' => sub { ($_[0] * $_[1])->copyto($_[0]) },
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Optimizer.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Optimizer.pm
index fd1316478db1..ad0e45503220 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Optimizer.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Optimizer.pm
@@ -63,14 +63,14 @@ method register()
Parameters
----------
- name: str
+ $name: Str
Name of required optimizer. Should be the name
of a subclass of Optimizer. Case insensitive.
- rescale_grad : float
+ :$rescale_grad : Num
Rescaling factor on gradient. Normally should be 1/batch_size.
- kwargs: dict
+ %kwargs: Hash
Parameters for optimizer
Returns
@@ -290,25 +290,25 @@ method _get_wd(Index $index)
Parameters
----------
- learning_rate : float, optional
+ learning_rate : Num, optional
learning_rate of SGD
- momentum : float, optional
+ momentum : Num, optional
momentum value
- wd : float, optional
+ wd : Num, optional
L2 regularization coefficient add to all the weights
- rescale_grad : float, optional
+ rescale_grad : Num, optional
rescaling factor of gradient. Normally should be 1/batch_size.
- clip_gradient : float, optional
+ clip_gradient : Num, optional
clip gradient in range [-clip_gradient, clip_gradient]
- param_idx2name : hash of string/int to float, optional
+ param_idx2name : hash ref of Str/Int to Num, optional
special treat weight decay in parameter ends with bias, gamma, and beta
- multi_precision: bool, optional
+ multi_precision: Bool, optional
Flag to control the internal precision of the optimizer.
False results in using the same precision as the weights (default),
True makes internal 32-bit copy of the weights and applies gradients
@@ -438,18 +438,15 @@ __PACKAGE__->register;
See the original paper at: https://jeremybernste.in/projects/amazon/signum.pdf
- For details of the update algorithm see
- :class:`~mxnet.ndarray.signsgd_update` and :class:`~mxnet.ndarray.signum_update`.
-
This optimizer accepts the following parameters in addition to those accepted
- by :class:`.Optimizer`.
+ by AI::MXNet::Optimizer
Parameters
----------
- momentum : float, optional
+ momentum : Num, optional
The momentum value.
- wd_lh : float, optional
- The amount of decoupled weight decay regularization, see details in the original paper at:\
+ wd_lh : Num, optional
+ The amount of decoupled weight decay regularization, see details in the original paper at:
https://arxiv.org/abs/1711.05101
=cut
@@ -536,11 +533,11 @@ __PACKAGE__->register;
Parameters
----------
- beta1 : float, optional
+ beta1 : Num, optional
0 < beta1 < 1. Generally close to 0.5.
- beta2 : float, optional
+ beta2 : Num, optional
0 < beta2 < 1. Generally close to 1.
- epsilon : float, optional
+ epsilon : Num, optional
Small value to avoid division by 0.
=cut
@@ -604,12 +601,12 @@ __PACKAGE__->register;
Parameters
----------
- momentum : float, optional
+ momentum : Num, optional
The momentum value.
- multi_precision: bool, optional
+ multi_precision: Bool, optional
Flag to control the internal precision of the optimizer.
- ``False`` results in using the same precision as the weights (default),
- ``True`` makes internal 32-bit copy of the weights and applies gradients
+ 0 results in using the same precision as the weights (default),
+ 1 makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.`<
Turning this on can improve convergence and accuracy when training with float16.
warmup_strategy: string ('linear', 'power2', 'sqrt'. , 'lars' default : 'linear')
@@ -896,26 +893,26 @@ extends 'AI::MXNet::Optimizer';
Parameters
----------
- learning_rate : float, optional
+ learning_rate : Num, optional
learning_rate of SGD
- momentum : float, optional
+ momentum : Num, optional
momentum value
- lamda : float, optional
+ lamda : NUm, optional
scale DC value
- wd : float, optional
+ wd : Num, optional
L2 regularization coefficient add to all the weights
- rescale_grad : float, optional
+ rescale_grad : Num, optional
rescaling factor of gradient. Normally should be 1/batch_size.
- clip_gradient : float, optional
+ clip_gradient : Num, optional
clip gradient in range [-clip_gradient, clip_gradient]
- param_idx2name : hash ref of string/int to float, optional
- special treat weight decay in parameter ends with bias, gamma, and beta
+ param_idx2name : hash ref of Str/Int to Num, optional
+ special threating of weight decay for parameters that end with bias, gamma, and beta
=cut
has 'momentum' => (is => 'ro', isa => 'Num', default => 0);
has 'lamda' => (is => 'ro', isa => 'Num', default => 0.04);
@@ -1091,16 +1088,16 @@ __PACKAGE__->register;
Parameters
----------
- learning_rate : float, optional
+ learning_rate : Num, optional
learning_rate of SGD
- wd : float, optional
+ wd : Num, optional
L2 regularization coefficient add to all the weights
- rescale_grad : float, optional
+ rescale_grad : Num, optional
rescaling factor of gradient. Normally should be 1/batch_size.
- clip_gradient : float, optional
+ clip_gradient : Num, optional
clip gradient in range [-clip_gradient, clip_gradient]
=cut
@@ -1158,29 +1155,26 @@ __PACKAGE__->register;
*Adam: A Method for Stochastic Optimization*,
http://arxiv.org/abs/1412.6980
- the code in this class was adapted from
- https://github.com/mila-udem/blocks/blob/master/blocks/algorithms/__init__.py#L765
-
Parameters
----------
- learning_rate : float, optional
+ learning_rate : Num, optional
Step size.
Default value is set to 0.001.
- beta1 : float, optional
+ beta1 : Num, optional
Exponential decay rate for the first moment estimates.
Default value is set to 0.9.
- beta2 : float, optional
+ beta2 : Num, optional
Exponential decay rate for the second moment estimates.
Default value is set to 0.999.
- epsilon : float, optional
+ epsilon : Num, optional
Default value is set to 1e-8.
- wd : float, optional
+ wd : NUm, optional
L2 regularization coefficient add to all the weights
- rescale_grad : float, optional
+ rescale_grad : Num, optional
rescaling factor of gradient. Normally should be 1/batch_size.
- clip_gradient : float, optional
+ clip_gradient : Num, optional
clip gradient in range [-clip_gradient, clip_gradient]
=cut
package AI::MXNet::Adam;
@@ -1271,21 +1265,21 @@ __PACKAGE__->register;
Parameters
----------
- learning_rate : float, optional
+ learning_rate : Num, optional
Step size.
Default value is set to 0.05.
- wd : float, optional
+ wd : Num, optional
L2 regularization coefficient add to all the weights
- rescale_grad : float, optional
+ rescale_grad : Num, optional
rescaling factor of gradient. Normally should be 1/batch_size.
- eps: float, optional
+ eps: Num, optional
A small float number to make the updating processing stable
Default value is set to 1e-7.
- clip_gradient : float, optional
+ clip_gradient : Num, optional
clip gradient in range [-clip_gradient, clip_gradient]
=cut
package AI::MXNet::AdaGrad;
@@ -1361,27 +1355,27 @@ __PACKAGE__->register;
Parameters
----------
- learning_rate : float, optional
+ learning_rate : Num, optional
Step size.
Default value is set to 0.001.
- gamma1: float, optional
+ gamma1: Num, optional
decay factor of moving average for gradient^2.
Default value is set to 0.9.
- gamma2: float, optional
+ gamma2: Num, optional
"momentum" factor.
Default value if set to 0.9.
Only used if centered=True
- epsilon : float, optional
+ epsilon : Num, optional
Default value is set to 1e-8.
- centered : bool, optional
+ centered : Bool, optional
Use Graves or Tielemans & Hintons version of RMSProp
- wd : float, optional
+ wd : Num, optional
L2 regularization coefficient add to all the weights
- rescale_grad : float, optional
+ rescale_grad : Num, optional
rescaling factor of gradient.
- clip_gradient : float, optional
+ clip_gradient : Num, optional
clip gradient in range [-clip_gradient, clip_gradient]
- clip_weights : float, optional
+ clip_weights : Num, optional
clip weights in range [-clip_weights, clip_weights]
=cut
@@ -1508,15 +1502,15 @@ __PACKAGE__->register;
Parameters
----------
- rho: float
+ rho: Num
Decay rate for both squared gradients and delta x
- epsilon : float
+ epsilon : Num
The constant as described in the thesis
- wd : float
+ wd : Num
L2 regularization coefficient add to all the weights
- rescale_grad : float, optional
+ rescale_grad : Num, optional
rescaling factor of gradient. Normally should be 1/batch_size.
- clip_gradient : float, optional
+ clip_gradient : Num, optional
clip gradient in range [-clip_gradient, clip_gradient]
=cut
package AI::MXNet::AdaDelta;
@@ -1614,18 +1608,14 @@ package AI::MXNet::Ftrl;
Referenced from *Ad Click Prediction: a View from the Trenches*, available at
http://dl.acm.org/citation.cfm?id=2488200.
- eta :
- .. math::
- \\eta_{t,i} = \\frac{learningrate}{\\beta+\\sqrt{\\sum_{s=1}^tg_{s,i}^2}}
-
- The optimizer updates the weight by::
+ The optimizer updates the weight by:
rescaled_grad = clip(grad * rescale_grad, clip_gradient)
z += rescaled_grad - (sqrt(n + rescaled_grad**2) - sqrt(n)) * weight / learning_rate
n += rescaled_grad**2
w = (sign(z) * lamda1 - z) / ((beta + sqrt(n)) / learning_rate + wd) * (abs(z) > lamda1)
- If the storage types of weight, state and grad are all ``row_sparse``, \
+ If the storage types of weight, state and grad are all row_sparse,
**sparse updates** are applied by::
for row in grad.indices:
@@ -1641,18 +1631,16 @@ package AI::MXNet::Ftrl;
provides slightly different semantics than the original update, and
may lead to different empirical results.
- For details of the update algorithm, see :class:`~mxnet.ndarray.ftrl_update`.
-
This optimizer accepts the following parameters in addition to those accepted
- by :class:`.Optimizer`.
+ by AI::MXNet::Optimizer
Parameters
----------
- lamda1 : float, optional
+ lamda1 : Num, optional
L1 regularization coefficient.
- learning_rate : float, optional
+ learning_rate : Num, optional
The initial learning rate.
- beta : float, optional
+ beta : Num, optional
Per-coordinate learning rate correlation parameter.
=cut
@@ -1720,9 +1708,9 @@ package AI::MXNet::Adamax;
Parameters
----------
- beta1 : float, optional
+ beta1 : Num, optional
Exponential decay rate for the first moment estimates.
- beta2 : float, optional
+ beta2 : Num, optional
Exponential decay rate for the second moment estimates.
=cut
@@ -1798,17 +1786,17 @@ package AI::MXNet::Nadam;
at http://cs229.stanford.edu/proj2015/054_report.pdf.
This optimizer accepts the following parameters in addition to those accepted
- AI::MXNet::Optimizer.
+ by AI::MXNet::Optimizer.
Parameters
----------
- beta1 : float, optional
+ beta1 : Num, optional
Exponential decay rate for the first moment estimates.
- beta2 : float, optional
+ beta2 : Num, optional
Exponential decay rate for the second moment estimates.
- epsilon : float, optional
+ epsilon : Num, optional
Small value to avoid division by 0.
- schedule_decay : float, optional
+ schedule_decay : Num, optional
Exponential decay rate for the momentum schedule
=cut
@@ -1879,7 +1867,11 @@ method update(
__PACKAGE__->register;
-# updater for kvstore
+=head1 NAME
+
+ AI::MXNet::Updater - Updater for kvstore
+=cut
+
package AI::MXNet::Updater;
use Mouse;
use Storable qw(thaw freeze);
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/RNN/Cell.pm b/perl-package/AI-MXNet/lib/AI/MXNet/RNN/Cell.pm
index f2d8b5369e99..9dd88cbb029e 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/RNN/Cell.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/RNN/Cell.pm
@@ -21,7 +21,7 @@ use AI::MXNet::Function::Parameters;
=head1 NAME
- AI::MXNet::RNN::Params
+ AI::MXNet::RNN::Params - A container for holding variables.
=cut
=head1 DESCRIPTION
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Symbol.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Symbol.pm
index bccf483d4367..57bfdf1d977c 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Symbol.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Symbol.pm
@@ -528,7 +528,7 @@ method list_inputs()
=cut
-method infer_type(Str|Undef @args)
+method infer_type(Maybe[Str] @args)
{
my ($positional_arguments, $kwargs, $kwargs_order) = _parse_arguments("Dtype", @args);
my $sdata = [];
@@ -1370,6 +1370,7 @@ method load(Str $fname)
}
=head2 load_json
+
Load symbol from json string.
Parameters
@@ -1469,12 +1470,12 @@ sub _parse_arguments
}
else
{
- confess("Argument need to be of type $type");
+ confess("Argument needs to be of type $type");
}
}
else
{
- confess("Argument need to be one type $type");
+ confess("Argument needs to be one type $type");
}
}
return (\@positional_arguments, \%kwargs, \@kwargs_order);
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Symbol/Base.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Symbol/Base.pm
index 2cb20b7c5610..d668decc6918 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Symbol/Base.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Symbol/Base.pm
@@ -32,7 +32,7 @@ use AI::MXNet::Function::Parameters;
=head1 DESCRIPTION
- A convenience class that loads all C++m symbol related functions at runtime.
+ A convenience class that loads all C++ symbol related functions at runtime.
=cut
my %function_meta;
diff --git a/perl-package/AI-MXNet/lib/AI/MXNet/Symbol/NameManager.pm b/perl-package/AI-MXNet/lib/AI/MXNet/Symbol/NameManager.pm
index 95ea8a6f49ea..0126655186fd 100644
--- a/perl-package/AI-MXNet/lib/AI/MXNet/Symbol/NameManager.pm
+++ b/perl-package/AI-MXNet/lib/AI/MXNet/Symbol/NameManager.pm
@@ -21,7 +21,11 @@ use warnings;
use Mouse;
use AI::MXNet::Function::Parameters;
-=head1
+=head1 NAME
+
+ AI::MXNet::Symbol::NameManager - Automated symbol naming.
+
+=head1 DESCRIPTION
NameManager that does an automatic naming.
diff --git a/perl-package/AI-MXNet/t/test_gluon.t b/perl-package/AI-MXNet/t/test_gluon.t
index 32127229b95f..545cb7b3f882 100644
--- a/perl-package/AI-MXNet/t/test_gluon.t
+++ b/perl-package/AI-MXNet/t/test_gluon.t
@@ -1164,7 +1164,7 @@ sub test_zero_grad
$net->($data)->backward;
});
$net->collect_params->zero_grad;
- my $grad = $net->collect_params->{test_zero_grad_weight}->grad;
+ my $grad = $net->collect_params->params->get('test_zero_grad_weight')->grad;
ok(almost_equal($grad->aspdl, $grad->aspdl * 0));
}
diff --git a/perl-package/AI-MXNetCAPI/Changes b/perl-package/AI-MXNetCAPI/Changes
index 8dad8b455364..938b8e268f1d 100644
--- a/perl-package/AI-MXNetCAPI/Changes
+++ b/perl-package/AI-MXNetCAPI/Changes
@@ -1,5 +1,8 @@
Revision history for Perl extension AI::MXNetCAPI
+1.32 Sun Aug 5 14:25:31 PDT 2018
+ - Bugfixes.
+
1.3 Tue Jun 26 20:57:40 PDT 2018
- Major update, Gluon interface updated to parity with Python's API
diff --git a/perl-package/AI-MXNetCAPI/META.json b/perl-package/AI-MXNetCAPI/META.json
index 35271e3edaab..854023559c62 100644
--- a/perl-package/AI-MXNetCAPI/META.json
+++ b/perl-package/AI-MXNetCAPI/META.json
@@ -37,5 +37,5 @@
}
},
"release_status" : "stable",
- "version" : "1.3"
+ "version" : "1.32"
}
diff --git a/perl-package/AI-MXNetCAPI/META.yml b/perl-package/AI-MXNetCAPI/META.yml
index 48760da13629..1db34c501d8c 100644
--- a/perl-package/AI-MXNetCAPI/META.yml
+++ b/perl-package/AI-MXNetCAPI/META.yml
@@ -19,4 +19,4 @@ no_index:
- inc
requires:
Test::More: '0'
-version: '1.3'
+version: '1.32'
diff --git a/perl-package/AI-MXNetCAPI/README b/perl-package/AI-MXNetCAPI/README
index dca8b4a1ee06..f5881ff2db07 100644
--- a/perl-package/AI-MXNetCAPI/README
+++ b/perl-package/AI-MXNetCAPI/README
@@ -1,4 +1,4 @@
-AI-MXNetCAPI version 1.3
+AI-MXNetCAPI version 1.32
=====================
Swig interface to MXNet c api.
diff --git a/perl-package/AI-MXNetCAPI/lib/AI/MXNetCAPI.pm b/perl-package/AI-MXNetCAPI/lib/AI/MXNetCAPI.pm
index b578507277d2..e371219b0ae6 100644
--- a/perl-package/AI-MXNetCAPI/lib/AI/MXNetCAPI.pm
+++ b/perl-package/AI-MXNetCAPI/lib/AI/MXNetCAPI.pm
@@ -18,7 +18,7 @@
package AI::MXNetCAPI;
use base qw(DynaLoader);
bootstrap AI::MXNetCAPI;
-our $VERSION = '1.3';
+our $VERSION = '1.32';
1;
__END__
diff --git a/perl-package/AI-MXNetCAPI/mxnet_typemaps.i b/perl-package/AI-MXNetCAPI/mxnet_typemaps.i
index 4d9177a000a0..68e11ca74e1a 100644
--- a/perl-package/AI-MXNetCAPI/mxnet_typemaps.i
+++ b/perl-package/AI-MXNetCAPI/mxnet_typemaps.i
@@ -1215,5 +1215,5 @@
%typemap(in) (void* callback_handle)
{
- $1 = (void*)$input;
+ $1 = (void*)newSVsv($input);
}