Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tensorflow: Add missing members to the tensorflow.keras.layers module. #11333

Merged
merged 27 commits into from
Mar 13, 2024
Merged
Show file tree
Hide file tree
Changes from 21 commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
ca822c2
Add some missing keras layers
hoel-bagard Jan 28, 2024
6e26f9b
Move some layers to keras.layers.preprocessing
hoel-bagard Jan 28, 2024
35056a7
fix: add MultiHeadAttention's __call__ to the stubtest allowlist
hoel-bagard Jan 28, 2024
6e83aa6
remove MultiHeadAttention...
hoel-bagard Jan 31, 2024
cd8632a
Revert "remove MultiHeadAttention..."
hoel-bagard Feb 1, 2024
755b0ab
type ignore the override
hoel-bagard Feb 1, 2024
d543957
Merge branch 'main' into hoel/add_tf_keras_layers
hoel-bagard Feb 4, 2024
9eda7f3
try to fix mypy crash.
hoel-bagard Feb 4, 2024
2c41a03
add modules to allowlist due to cursed imports.
hoel-bagard Feb 5, 2024
4bf4ad8
Merge branch 'main' into hoel/add_tf_keras_layers
hoel-bagard Feb 8, 2024
976b9c8
remove tensorflow.keras.layers.MultiHeadAttention.__call__ from allow…
hoel-bagard Feb 8, 2024
37f1b7c
Merge branch 'main' into hoel/add_tf_keras_layers
hoel-bagard Feb 17, 2024
a7739fb
test
hoel-bagard Feb 17, 2024
f5f74c2
Merge branch 'main' into hoel/add_tf_keras_layers
JelleZijlstra Feb 17, 2024
03c3e9b
Revert "test"
hoel-bagard Feb 17, 2024
aa05ac7
Merge branch 'main' into hoel/add_tf_keras_layers
JelleZijlstra Feb 17, 2024
7d0343f
fix: tuple -> Iterable
hoel-bagard Feb 17, 2024
37c668b
add PreprocessingLayer methods/overloads
hoel-bagard Feb 17, 2024
4228893
fix PreprocessingLayer typing
hoel-bagard Feb 17, 2024
31134a1
make IndexLookup private
hoel-bagard Feb 17, 2024
aaa3738
silence/ignore mypy error.
hoel-bagard Feb 17, 2024
ac3b558
fix: make PreprocessingLayer's is_adapted into a property.
hoel-bagard Feb 17, 2024
bc7926a
Merge branch 'main' into hoel/add_tf_keras_layers
JelleZijlstra Feb 17, 2024
05ce2ed
Merge branch 'main' into hoel/add_tf_keras_layers
rchen152 Feb 27, 2024
189e918
Merge branch 'main' into hoel/add_tf_keras_layers
hoel-bagard Mar 12, 2024
849279e
try to fix pytype issue
hoel-bagard Mar 13, 2024
3d1cfbc
merge with main
hoel-bagard Mar 13, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions stubs/tensorflow/@tests/stubtest_allowlist.txt
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,9 @@ tensorflow._aliases
# but the real module file is completely different name (even package) and dynamically handled.
# tf.initializers at runtime is <module 'keras.api._v2.keras.initializers' from '...'>
tensorflow.initializers
# Other cursed import magic similar to the one above.
tensorflow.keras.layers.preprocessing
tensorflow.keras.layers.preprocessing.index_lookup
# Another cursed import magic similar to the one above.
tensorflow.distribute.experimental.coordinator

Expand Down
3 changes: 3 additions & 0 deletions stubs/tensorflow/tensorflow/_aliases.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,13 @@ class KerasSerializable2(Protocol):

KerasSerializable: TypeAlias = KerasSerializable1 | KerasSerializable2

Integer: TypeAlias = tf.Tensor | int | IntArray | np.number[Any]
Float: TypeAlias = Integer | float | FloatArray
Slice: TypeAlias = int | slice | None
FloatDataSequence: TypeAlias = Sequence[float] | Sequence[FloatDataSequence]
IntDataSequence: TypeAlias = Sequence[int] | Sequence[IntDataSequence]
StrDataSequence: TypeAlias = Sequence[str] | Sequence[StrDataSequence]
DataSequence: TypeAlias = FloatDataSequence | StrDataSequence | IntDataSequence
ScalarTensorCompatible: TypeAlias = tf.Tensor | str | float | np.ndarray[Any, Any] | np.number[Any]
UIntTensorCompatible: TypeAlias = tf.Tensor | int | UIntArray
StringTensorCompatible: TypeAlias = tf.Tensor | str | npt.NDArray[np.str_] | Sequence[StringTensorCompatible]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,15 +1,17 @@
from _typeshed import Incomplete
from collections.abc import Callable, Iterable, Sequence
from typing import Any, Generic, TypeVar, overload
from collections.abc import Callable, Iterable, Mapping, Sequence
from typing import Any, Generic, Literal, TypeVar, overload
from typing_extensions import Self, TypeAlias

import tensorflow as tf
from tensorflow import Tensor, Variable, VariableAggregation, VariableSynchronization
from tensorflow._aliases import AnyArray, DTypeLike, TensorCompatible
from tensorflow._aliases import AnyArray, DTypeLike, TensorCompatible, TensorLike
from tensorflow.keras.activations import _Activation
from tensorflow.keras.constraints import Constraint
from tensorflow.keras.initializers import _Initializer
from tensorflow.keras.regularizers import _Regularizer
from tensorflow.keras.layers.preprocessing import IntegerLookup as IntegerLookup, StringLookup as StringLookup
from tensorflow.keras.regularizers import Regularizer, _Regularizer
from tensorflow.python.feature_column.feature_column_v2 import DenseColumn, SequenceDenseColumn

_InputT = TypeVar("_InputT", contravariant=True)
_OutputT = TypeVar("_OutputT", covariant=True)
Expand Down Expand Up @@ -127,7 +129,7 @@ class Dense(Layer[tf.Tensor, tf.Tensor]):
kernel_constraint: _Constraint = None,
bias_constraint: _Constraint = None,
trainable: bool = True,
dtype: _LayerDtype = None,
Copy link
Contributor

@hmc-cs-mdrissi hmc-cs-mdrissi Feb 17, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

One optional improvement (feel free to leave out of scope), there's comment near here that some of these signatures can be simplified with Unpack TypedDict (pep 692). The upstream stubs actually do use that technique which saves on repetition of dtype/trainable/name/dynamic/etc. I see that mypy is checked in this issue.

Can we start using 692 in typeshed now (has it been used yet?) @JelleZijlstra

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Still waiting for pytype as you can see in #9710

dtype: _LayerDtype | None = None,
dynamic: bool = False,
name: str | None = None,
) -> None: ...
Expand All @@ -149,7 +151,7 @@ class BatchNormalization(Layer[tf.Tensor, tf.Tensor]):
beta_constraint: _Constraint = None,
gamma_constraint: _Constraint = None,
trainable: bool = True,
dtype: _LayerDtype = None,
dtype: _LayerDtype | None = None,
dynamic: bool = False,
name: str | None = None,
) -> None: ...
Expand All @@ -161,7 +163,7 @@ class ReLU(Layer[tf.Tensor, tf.Tensor]):
negative_slope: float | None = 0.0,
threshold: float | None = 0.0,
trainable: bool = True,
dtype: _LayerDtype = None,
dtype: _LayerDtype | None = None,
dynamic: bool = False,
name: str | None = None,
) -> None: ...
Expand All @@ -173,7 +175,7 @@ class Dropout(Layer[tf.Tensor, tf.Tensor]):
noise_shape: TensorCompatible | Sequence[int | None] | None = None,
seed: int | None = None,
trainable: bool = True,
dtype: _LayerDtype = None,
dtype: _LayerDtype | None = None,
dynamic: bool = False,
name: str | None = None,
) -> None: ...
Expand All @@ -189,6 +191,133 @@ class Embedding(Layer[tf.Tensor, tf.Tensor]):
mask_zero: bool = False,
input_length: int | None = None,
trainable: bool = True,
dtype: _LayerDtype | None = None,
dynamic: bool = False,
name: str | None = None,
) -> None: ...

class Conv2D(Layer[tf.Tensor, tf.Tensor]):
def __init__(
self,
filters: int,
kernel_size: int | Iterable[int],
strides: int | Iterable[int] = (1, 1),
padding: Literal["valid", "same"] = "valid",
data_format: None | Literal["channels_last", "channels_first"] = None,
dilation_rate: int | Iterable[int] = (1, 1),
groups: int = 1,
activation: _Activation = None,
use_bias: bool = True,
kernel_initializer: _Initializer = "glorot_uniform",
bias_initializer: _Initializer = "zeros",
kernel_regularizer: _Regularizer = None,
bias_regularizer: _Regularizer = None,
activity_regularizer: _Regularizer = None,
kernel_constraint: _Constraint = None,
bias_constraint: _Constraint = None,
trainable: bool = True,
dtype: _LayerDtype | None = None,
dynamic: bool = False,
name: str | None = None,
) -> None: ...

class Identity(Layer[tf.Tensor, tf.Tensor]):
def __init__(
self, trainable: bool = True, dtype: _LayerDtype = None, dynamic: bool = False, name: str | None = None
) -> None: ...

class LayerNormalization(Layer[tf.Tensor, tf.Tensor]):
def __init__(
self,
axis: int = -1,
epsilon: float = 0.001,
center: bool = True,
scale: bool = True,
beta_initializer: _Initializer = "zeros",
gamma_initializer: _Initializer = "ones",
beta_regularizer: _Regularizer = None,
gamma_regularizer: _Regularizer = None,
beta_constraint: _Constraint = None,
gamma_constraint: _Constraint = None,
trainable: bool = True,
dtype: _LayerDtype | None = None,
dynamic: bool = False,
name: str | None = None,
) -> None: ...

class DenseFeatures(Layer[Mapping[str, TensorLike], tf.Tensor]):
def __init__(
self,
feature_columns: Sequence[DenseColumn | SequenceDenseColumn],
trainable: bool = True,
dtype: _LayerDtype = None,
dynamic: bool = False,
name: str | None = None,
) -> None: ...

class MultiHeadAttention(Layer[Any, tf.Tensor]):
def __init__(
self,
num_heads: int,
key_dim: int | None,
value_dim: int | None = None,
dropout: float = 0.0,
use_bias: bool = True,
output_shape: tuple[int, ...] | None = None,
attention_axes: tuple[int, ...] | None = None,
kernel_initialize: _Initializer = "glorot_uniform",
bias_initializer: _Initializer = "zeros",
kernel_regularizer: Regularizer | None = None,
bias_regularizer: _Regularizer | None = None,
activity_regularizer: _Regularizer | None = None,
kernel_constraint: _Constraint | None = None,
bias_constraint: _Constraint | None = None,
trainable: bool = True,
dtype: _LayerDtype | None = None,
dynamic: bool = False,
name: str | None = None,
) -> None: ...
# @override
@overload # type: ignore
def __call__(
self,
query: tf.Tensor,
value: tf.Tensor,
key: tf.Tensor | None,
attention_mask: tf.Tensor | None,
return_attention_scores: Literal[False],
training: bool,
use_causal_mask: bool,
) -> tf.Tensor: ...
@overload
def __call__(
self,
query: tf.Tensor,
value: tf.Tensor,
key: tf.Tensor | None,
attention_mask: tf.Tensor | None,
return_attention_scores: Literal[True],
training: bool,
use_causal_mask: bool,
) -> tuple[tf.Tensor, tf.Tensor]: ...
@overload
def __call__(
self,
query: tf.Tensor,
value: tf.Tensor,
key: tf.Tensor | None = None,
attention_mask: tf.Tensor | None = None,
return_attention_scores: bool = False,
training: bool = False,
use_causal_mask: bool = False,
) -> tuple[tf.Tensor, tf.Tensor] | tf.Tensor: ...

class GaussianDropout(Layer[tf.Tensor, tf.Tensor]):
def __init__(
self,
rate: float,
seed: int | None = None,
trainable: bool = True,
dtype: _LayerDtype = None,
dynamic: bool = False,
name: str | None = None,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import abc
from typing import overload

import tensorflow as tf
from tensorflow._aliases import AnyArray, DataSequence, Float, Integer, TensorCompatible, TensorLike
from tensorflow.keras.layers import Layer

class PreprocessingLayer(Layer[TensorLike, TensorLike], metaclass=abc.ABCMeta):
is_adapted: bool
@overload # type: ignore
def __call__(self, inputs: tf.Tensor, *, training: bool = False, mask: TensorCompatible | None = None) -> tf.Tensor: ...
@overload
def __call__(
self, inputs: tf.SparseTensor, *, training: bool = False, mask: TensorCompatible | None = None
) -> tf.SparseTensor: ...
@overload
def __call__(
self, inputs: tf.RaggedTensor, *, training: bool = False, mask: TensorCompatible | None = None
) -> tf.RaggedTensor: ...
def adapt(
self,
data: tf.data.Dataset[TensorLike] | AnyArray | DataSequence,
batch_size: Integer | None = None,
steps: Float | None = None,
) -> None: ...
def compile(self, run_eagerly: bool | None = None, steps_per_execution: Integer | None = None) -> None: ...
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from typing import Literal

from tensorflow._aliases import TensorCompatible
from tensorflow.keras.layers.preprocessing.index_lookup import _IndexLookup

class StringLookup(_IndexLookup):
def __init__(
self,
max_tokens: int | None = None,
num_oov_indices: int = 1,
mask_token: str | None = None,
oov_token: str = "[UNK]",
vocabulary: str | None | TensorCompatible = None,
idf_weights: TensorCompatible | None = None,
encoding: str = "utf-8",
invert: bool = False,
output_mode: Literal["int", "count", "multi_hot", "one_hot", "tf_idf"] = "int",
sparse: bool = False,
pad_to_max_tokens: bool = False,
) -> None: ...

class IntegerLookup(_IndexLookup):
def __init__(
self,
max_tokens: int | None = None,
num_oov_indices: int = 1,
mask_token: int | None = None,
oov_token: int = -1,
vocabulary: str | None | TensorCompatible = None,
vocabulary_dtype: Literal["int64", "int32"] = "int64",
idf_weights: TensorCompatible | None = None,
invert: bool = False,
output_mode: Literal["int", "count", "multi_hot", "one_hot", "tf_idf"] = "int",
sparse: bool = False,
pad_to_max_tokens: bool = False,
) -> None: ...
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from _typeshed import Incomplete

import tensorflow as tf
from tensorflow.keras.layers.experimental.preprocessing import PreprocessingLayer

class _IndexLookup(PreprocessingLayer):
def compute_output_signature(self, input_spec: Incomplete) -> tf.TensorSpec: ...
def get_vocabulary(self, include_special_tokens: bool = True) -> list[Incomplete]: ...
def vocabulary_size(self) -> int: ...
Loading