.. automodule:: torch.nn
.. currentmodule:: torch.nn
.. autoclass:: Parameter :members:
.. autoclass:: Module :members:
.. autoclass:: Sequential :members:
.. autoclass:: ModuleList :members:
.. autoclass:: ModuleDict :members:
.. autoclass:: ParameterList :members:
.. autoclass:: ParameterDict :members:
.. autoclass:: Conv1d :members:
.. autoclass:: Conv2d :members:
.. autoclass:: Conv3d :members:
.. autoclass:: ConvTranspose1d :members:
.. autoclass:: ConvTranspose2d :members:
.. autoclass:: ConvTranspose3d :members:
.. autoclass:: Unfold :members:
.. autoclass:: Fold :members:
.. autoclass:: MaxPool1d :members:
.. autoclass:: MaxPool2d :members:
.. autoclass:: MaxPool3d :members:
.. autoclass:: MaxUnpool1d :members:
.. autoclass:: MaxUnpool2d :members:
.. autoclass:: MaxUnpool3d :members:
.. autoclass:: AvgPool1d :members:
.. autoclass:: AvgPool2d :members:
.. autoclass:: AvgPool3d :members:
.. autoclass:: FractionalMaxPool2d :members:
.. autoclass:: LPPool1d :members:
.. autoclass:: LPPool2d :members:
.. autoclass:: AdaptiveMaxPool1d :members:
.. autoclass:: AdaptiveMaxPool2d :members:
.. autoclass:: AdaptiveMaxPool3d :members:
.. autoclass:: AdaptiveAvgPool1d :members:
.. autoclass:: AdaptiveAvgPool2d :members:
.. autoclass:: AdaptiveAvgPool3d :members:
.. autoclass:: ReflectionPad1d :members:
.. autoclass:: ReflectionPad2d :members:
.. autoclass:: ReplicationPad1d :members:
.. autoclass:: ReplicationPad2d :members:
.. autoclass:: ReplicationPad3d :members:
.. autoclass:: ZeroPad2d :members:
.. autoclass:: ConstantPad1d :members:
.. autoclass:: ConstantPad2d :members:
.. autoclass:: ConstantPad3d :members:
.. autoclass:: ELU :members:
.. autoclass:: Hardshrink :members:
.. autoclass:: Hardtanh :members:
.. autoclass:: LeakyReLU :members:
.. autoclass:: LogSigmoid :members:
.. autoclass:: MultiheadAttention :members:
.. autoclass:: PReLU :members:
.. autoclass:: ReLU :members:
.. autoclass:: ReLU6 :members:
.. autoclass:: RReLU :members:
.. autoclass:: SELU :members:
.. autoclass:: CELU :members:
.. autoclass:: Sigmoid :members:
.. autoclass:: Softplus :members:
.. autoclass:: Softshrink :members:
.. autoclass:: Softsign :members:
.. autoclass:: Tanh :members:
.. autoclass:: Tanhshrink :members:
.. autoclass:: Threshold :members:
.. autoclass:: Softmin :members:
.. autoclass:: Softmax :members:
.. autoclass:: Softmax2d :members:
.. autoclass:: LogSoftmax :members:
.. autoclass:: AdaptiveLogSoftmaxWithLoss :members:
.. autoclass:: BatchNorm1d :members:
.. autoclass:: BatchNorm2d :members:
.. autoclass:: BatchNorm3d :members:
.. autoclass:: GroupNorm :members:
.. autoclass:: SyncBatchNorm :members:
.. autoclass:: InstanceNorm1d :members:
.. autoclass:: InstanceNorm2d :members:
.. autoclass:: InstanceNorm3d :members:
.. autoclass:: LayerNorm :members:
.. autoclass:: LocalResponseNorm :members:
.. autoclass:: RNN :members:
.. autoclass:: LSTM :members:
.. autoclass:: GRU :members:
.. autoclass:: RNNCell :members:
.. autoclass:: LSTMCell :members:
.. autoclass:: GRUCell :members:
.. autoclass:: Transformer :members:
.. autoclass:: TransformerEncoder :members:
.. autoclass:: TransformerDecoder :members:
.. autoclass:: TransformerEncoderLayer :members:
.. autoclass:: TransformerDecoderLayer :members:
.. autoclass:: Identity :members:
.. autoclass:: Linear :members:
.. autoclass:: Bilinear :members:
.. autoclass:: Dropout :members:
.. autoclass:: Dropout2d :members:
.. autoclass:: Dropout3d :members:
.. autoclass:: AlphaDropout :members:
.. autoclass:: Embedding :members:
.. autoclass:: EmbeddingBag :members:
.. autoclass:: CosineSimilarity :members:
.. autoclass:: PairwiseDistance :members:
.. autoclass:: L1Loss :members:
.. autoclass:: MSELoss :members:
.. autoclass:: CrossEntropyLoss :members:
.. autoclass:: CTCLoss :members:
.. autoclass:: NLLLoss :members:
.. autoclass:: PoissonNLLLoss :members:
.. autoclass:: KLDivLoss :members:
.. autoclass:: BCELoss :members:
.. autoclass:: BCEWithLogitsLoss :members:
.. autoclass:: MarginRankingLoss :members:
.. autoclass:: HingeEmbeddingLoss :members:
.. autoclass:: MultiLabelMarginLoss :members:
.. autoclass:: SmoothL1Loss :members:
.. autoclass:: SoftMarginLoss :members:
.. autoclass:: MultiLabelSoftMarginLoss :members:
.. autoclass:: CosineEmbeddingLoss :members:
.. autoclass:: MultiMarginLoss :members:
.. autoclass:: TripletMarginLoss :members:
.. autoclass:: PixelShuffle :members:
.. autoclass:: Upsample :members:
.. autoclass:: UpsamplingNearest2d :members:
.. autoclass:: UpsamplingBilinear2d :members:
.. autoclass:: DataParallel :members:
.. autoclass:: torch.nn.parallel.DistributedDataParallel :members:
.. autofunction:: torch.nn.utils.clip_grad_norm_
.. autofunction:: torch.nn.utils.clip_grad_value_
.. autofunction:: torch.nn.utils.parameters_to_vector
.. autofunction:: torch.nn.utils.vector_to_parameters
.. autofunction:: torch.nn.utils.weight_norm
.. autofunction:: torch.nn.utils.remove_weight_norm
.. autofunction:: torch.nn.utils.spectral_norm
.. autofunction:: torch.nn.utils.remove_spectral_norm
.. currentmodule:: torch.nn.utils.rnn
.. autofunction:: torch.nn.utils.rnn.PackedSequence
.. autofunction:: torch.nn.utils.rnn.pack_padded_sequence
.. autofunction:: torch.nn.utils.rnn.pad_packed_sequence
.. autofunction:: torch.nn.utils.rnn.pad_sequence
.. autofunction:: torch.nn.utils.rnn.pack_sequence
.. currentmodule:: torch.nn
.. autoclass:: Flatten :members:
Quantization refers to techniques for performing computations and storing tensors at lower bitwidths than floating point precision. PyTorch supports both per tensor and per channel asymmetric linear quantization. To learn more how to use quantized functions in PyTorch, please refer to the :ref:`quantization-doc` documentation.