Skip to content

Commit

Permalink
Merge branch 'master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
oarriaga authored Nov 13, 2024
2 parents 7be4e85 + e3938f1 commit dc912bd
Show file tree
Hide file tree
Showing 146 changed files with 6,701 additions and 653 deletions.
30 changes: 11 additions & 19 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
[![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/keras-team/keras-hub/issues)

> [!IMPORTANT]
> 📢 KerasNLP is becoming KerasHub! 📢 Read
> 📢 KerasNLP is now KerasHub! 📢 Read
> [the announcement](https://github.com/keras-team/keras-hub/issues/1831).
>
> We have renamed the repo to KerasHub in preparation for the release, but have not yet
Expand All @@ -26,7 +26,7 @@ All models support JAX, TensorFlow, and PyTorch from a single model
definition and can be fine-tuned on GPUs and TPUs out of the box. Models can
be trained on individual accelerators with built-in PEFT techniques, or
fine-tuned at scale with model and data parallel training. See our
[Getting Started guide](https://keras.io/guides/keras_nlp/getting_started)
[Getting Started guide](https://keras.io/guides/keras_hub/getting_started)
to start learning our API. Browse our models on
[Kaggle](https://www.kaggle.com/organizations/keras/models).
We welcome contributions.
Expand All @@ -35,9 +35,9 @@ We welcome contributions.

### For everyone

- [Home Page](https://keras.io/keras_nlp)
- [Developer Guides](https://keras.io/guides/keras_nlp)
- [API Reference](https://keras.io/api/keras_nlp)
- [Home Page](https://keras.io/keras_hub)
- [Developer Guides](https://keras.io/guides/keras_hub)
- [API Reference](https://keras.io/api/keras_hub)
- [Pre-trained Models](https://www.kaggle.com/organizations/keras/models)

### For contributors
Expand All @@ -56,7 +56,7 @@ Fine-tune a BERT classifier on IMDb movie reviews:
import os
os.environ["KERAS_BACKEND"] = "jax" # Or "tensorflow" or "torch"!

import keras_nlp
import keras_hub
import tensorflow_datasets as tfds

imdb_train, imdb_test = tfds.load(
Expand All @@ -67,7 +67,7 @@ imdb_train, imdb_test = tfds.load(
)

# Load a BERT model.
classifier = keras_nlp.models.Classifier.from_preset(
classifier = keras_hub.models.Classifier.from_preset(
"bert_base_en",
num_classes=2,
activation="softmax",
Expand All @@ -79,25 +79,17 @@ classifier.fit(imdb_train, validation_data=imdb_test)
classifier.predict(["What an amazing movie!", "A total waste of my time."])
```

Try it out [in a colab](https://colab.research.google.com/gist/mattdangerw/e457e42d5ea827110c8d5cb4eb9d9a07/kerasnlp-quickstart.ipynb).
Try it out [in a colab](https://colab.research.google.com/drive/1gSWkh3yOLwmKAaNh2dQQ6kQIlnGte7P2?usp=sharing).
For more in depth guides and examples, visit
[keras.io/keras_nlp](https://keras.io/keras_nlp/).
[keras.io/keras_hub](https://keras.io/keras_hub/).

## Installation

KerasHub is currently in pre-release. Note that pre-release versions may
introduce breaking changes to the API in future versions. For a stable and
supported experience, we recommend installing `keras-nlp` version 0.15.1:

```bash
pip install keras-nlp==0.15.1
```

To try out the latest pre-release version of KerasHub, you can use
To try out the latest version of KerasHub, you can use
our nightly package:

```bash
pip install keras-hub-nightly
pip install keras-hub
```

KerasHub currently requires TensorFlow to be installed for use of the
Expand Down
Binary file added keras_hub/.DS_Store
Binary file not shown.
13 changes: 13 additions & 0 deletions keras_hub/api/layers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,21 +34,34 @@
from keras_hub.src.layers.preprocessing.random_deletion import RandomDeletion
from keras_hub.src.layers.preprocessing.random_swap import RandomSwap
from keras_hub.src.layers.preprocessing.start_end_packer import StartEndPacker
from keras_hub.src.models.clip.clip_image_converter import CLIPImageConverter
from keras_hub.src.models.deeplab_v3.deeplab_v3_image_converter import (
DeepLabV3ImageConverter,
)
from keras_hub.src.models.densenet.densenet_image_converter import (
DenseNetImageConverter,
)
from keras_hub.src.models.efficientnet.efficientnet_image_converter import (
EfficientNetImageConverter,
)
from keras_hub.src.models.mit.mit_image_converter import MiTImageConverter
from keras_hub.src.models.pali_gemma.pali_gemma_image_converter import (
PaliGemmaImageConverter,
)
from keras_hub.src.models.resnet.resnet_image_converter import (
ResNetImageConverter,
)
from keras_hub.src.models.retinanet.anchor_generator import AnchorGenerator
from keras_hub.src.models.retinanet.retinanet_image_converter import (
RetinaNetImageConverter,
)
from keras_hub.src.models.sam.sam_image_converter import SAMImageConverter
from keras_hub.src.models.sam.sam_mask_decoder import SAMMaskDecoder
from keras_hub.src.models.sam.sam_prompt_encoder import SAMPromptEncoder
from keras_hub.src.models.segformer.segformer_image_converter import (
SegFormerImageConverter,
)
from keras_hub.src.models.vgg.vgg_image_converter import VGGImageConverter
from keras_hub.src.models.whisper.whisper_audio_converter import (
WhisperAudioConverter,
)
39 changes: 34 additions & 5 deletions keras_hub/api/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,11 @@
from keras_hub.src.models.bloom.bloom_tokenizer import BloomTokenizer
from keras_hub.src.models.causal_lm import CausalLM
from keras_hub.src.models.causal_lm_preprocessor import CausalLMPreprocessor
from keras_hub.src.models.clip.clip_backbone import CLIPBackbone
from keras_hub.src.models.clip.clip_preprocessor import CLIPPreprocessor
from keras_hub.src.models.clip.clip_text_encoder import CLIPTextEncoder
from keras_hub.src.models.clip.clip_tokenizer import CLIPTokenizer
from keras_hub.src.models.clip.clip_vision_encoder import CLIPVisionEncoder
from keras_hub.src.models.csp_darknet.csp_darknet_backbone import (
CSPDarkNetBackbone,
)
Expand Down Expand Up @@ -129,6 +132,12 @@
from keras_hub.src.models.efficientnet.efficientnet_backbone import (
EfficientNetBackbone,
)
from keras_hub.src.models.efficientnet.efficientnet_image_classifier import (
EfficientNetImageClassifier,
)
from keras_hub.src.models.efficientnet.efficientnet_image_classifier_preprocessor import (
EfficientNetImageClassifierPreprocessor,
)
from keras_hub.src.models.electra.electra_backbone import ElectraBackbone
from keras_hub.src.models.electra.electra_tokenizer import ElectraTokenizer
from keras_hub.src.models.f_net.f_net_backbone import FNetBackbone
Expand Down Expand Up @@ -177,6 +186,10 @@
from keras_hub.src.models.image_classifier_preprocessor import (
ImageClassifierPreprocessor,
)
from keras_hub.src.models.image_object_detector import ImageObjectDetector
from keras_hub.src.models.image_object_detector_preprocessor import (
ImageObjectDetectorPreprocessor,
)
from keras_hub.src.models.image_segmenter import ImageSegmenter
from keras_hub.src.models.image_segmenter_preprocessor import (
ImageSegmenterPreprocessor,
Expand All @@ -203,11 +216,10 @@
MistralCausalLMPreprocessor,
)
from keras_hub.src.models.mistral.mistral_tokenizer import MistralTokenizer
from keras_hub.src.models.mix_transformer.mix_transformer_backbone import (
MiTBackbone,
)
from keras_hub.src.models.mix_transformer.mix_transformer_classifier import (
MiTImageClassifier,
from keras_hub.src.models.mit.mit_backbone import MiTBackbone
from keras_hub.src.models.mit.mit_image_classifier import MiTImageClassifier
from keras_hub.src.models.mit.mit_image_classifier_preprocessor import (
MiTImageClassifierPreprocessor,
)
from keras_hub.src.models.mobilenet.mobilenet_backbone import MobileNetBackbone
from keras_hub.src.models.mobilenet.mobilenet_image_classifier import (
Expand Down Expand Up @@ -246,6 +258,13 @@
from keras_hub.src.models.resnet.resnet_image_classifier_preprocessor import (
ResNetImageClassifierPreprocessor,
)
from keras_hub.src.models.retinanet.retinanet_backbone import RetinaNetBackbone
from keras_hub.src.models.retinanet.retinanet_object_detector import (
RetinaNetObjectDetector,
)
from keras_hub.src.models.retinanet.retinanet_object_detector_preprocessor import (
RetinaNetObjectDetectorPreprocessor,
)
from keras_hub.src.models.roberta.roberta_backbone import RobertaBackbone
from keras_hub.src.models.roberta.roberta_masked_lm import RobertaMaskedLM
from keras_hub.src.models.roberta.roberta_masked_lm_preprocessor import (
Expand All @@ -269,6 +288,13 @@
from keras_hub.src.models.sam.sam_image_segmenter_preprocessor import (
SAMImageSegmenterPreprocessor,
)
from keras_hub.src.models.segformer.segformer_backbone import SegFormerBackbone
from keras_hub.src.models.segformer.segformer_image_segmenter import (
SegFormerImageSegmenter,
)
from keras_hub.src.models.segformer.segformer_image_segmenter_preprocessor import (
SegFormerImageSegmenterPreprocessor,
)
from keras_hub.src.models.seq_2_seq_lm import Seq2SeqLM
from keras_hub.src.models.seq_2_seq_lm_preprocessor import Seq2SeqLMPreprocessor
from keras_hub.src.models.stable_diffusion_3.stable_diffusion_3_backbone import (
Expand Down Expand Up @@ -298,6 +324,9 @@
from keras_hub.src.models.text_to_image import TextToImage
from keras_hub.src.models.vgg.vgg_backbone import VGGBackbone
from keras_hub.src.models.vgg.vgg_image_classifier import VGGImageClassifier
from keras_hub.src.models.vgg.vgg_image_classifier_preprocessor import (
VGGImageClassifierPreprocessor,
)
from keras_hub.src.models.vit_det.vit_det_backbone import ViTDetBackbone
from keras_hub.src.models.whisper.whisper_backbone import WhisperBackbone
from keras_hub.src.models.whisper.whisper_tokenizer import WhisperTokenizer
Expand Down
2 changes: 1 addition & 1 deletion keras_hub/src/bounding_box/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,4 @@
from keras_hub.src.bounding_box.utils import as_relative
from keras_hub.src.bounding_box.utils import clip_to_image
from keras_hub.src.bounding_box.utils import is_relative
from keras_hub.src.bounding_box.validate_format import validate_format
from keras_hub.src.bounding_box.validate_format import validate_format
114 changes: 102 additions & 12 deletions keras_hub/src/bounding_box/converters.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,29 +20,74 @@ class RequiresImagesException(Exception):
ALL_AXES = 4


def _encode_box_to_deltas(
def encode_box_to_deltas(
anchors,
boxes,
anchor_format: str,
box_format: str,
anchor_format,
box_format,
encoding_format="center_yxhw",
variance=None,
image_shape=None,
):
"""Converts bounding_boxes from `center_yxhw` to delta format."""
"""Encodes bounding boxes relative to anchors as deltas.
This function calculates the deltas that represent the difference between
bounding boxes and provided anchors. Deltas encode the offsets and scaling
factors to apply to anchors to obtain the target boxes.
Boxes and anchors are first converted to the specified `encoding_format`
(defaulting to `center_yxhw`) for consistent delta representation.
Args:
anchors: `Tensors`. Anchor boxes with shape of `(N, 4)` where N is the
number of anchors.
boxes: `Tensors` Bounding boxes to encode. Boxes can be of shape
`(B, N, 4)` or `(N, 4)`.
anchor_format: str. The format of the input `anchors`
(e.g., "xyxy", "xywh", etc.).
box_format: str. The format of the input `boxes`
(e.g., "xyxy", "xywh", etc.).
encoding_format: str. The intermediate format to which boxes and anchors
are converted before delta calculation. Defaults to "center_yxhw".
variance: `List[float]`. A 4-element array/tensor representing variance
factors to scale the box deltas. If provided, the calculated deltas
are divided by the variance. Defaults to None.
image_shape: `Tuple[int]`. The shape of the image (height, width, 3).
When using relative bounding box format for `box_format` the
`image_shape` is used for normalization.
Returns:
Encoded box deltas. The return type matches the `encode_format`.
Raises:
ValueError: If `variance` is not None and its length is not 4.
ValueError: If `encoding_format` is not `"center_xywh"` or
`"center_yxhw"`.
"""
if variance is not None:
variance = ops.convert_to_tensor(variance, "float32")
var_len = variance.shape[-1]

if var_len != 4:
raise ValueError(f"`variance` must be length 4, got {variance}")

if encoding_format not in ["center_xywh", "center_yxhw"]:
raise ValueError(
"`encoding_format` should be one of 'center_xywh' or 'center_yxhw', "
f"got {encoding_format}"
)

encoded_anchors = convert_format(
anchors,
source=anchor_format,
target="center_yxhw",
target=encoding_format,
image_shape=image_shape,
)
boxes = convert_format(
boxes, source=box_format, target="center_yxhw", image_shape=image_shape
boxes,
source=box_format,
target=encoding_format,
image_shape=image_shape,
)
anchor_dimensions = ops.maximum(
encoded_anchors[..., 2:], keras.backend.epsilon()
Expand All @@ -61,27 +106,72 @@ def _encode_box_to_deltas(
return boxes_delta


def _decode_deltas_to_boxes(
def decode_deltas_to_boxes(
anchors,
boxes_delta,
anchor_format: str,
box_format: str,
anchor_format,
box_format,
encoded_format="center_yxhw",
variance=None,
image_shape=None,
):
"""Converts bounding_boxes from delta format to `center_yxhw`."""
"""Converts bounding boxes from delta format to the specified `box_format`.
This function decodes bounding box deltas relative to anchors to obtain the
final bounding box coordinates. The boxes are encoded in a specific
`encoded_format` (center_yxhw by default) during the decoding process.
This allows flexibility in how the deltas are applied to the anchors.
Args:
anchors: Can be `Tensors` or `Dict[Tensors]` where keys are level
indices and values are corresponding anchor boxes.
The shape of the array/tensor should be `(N, 4)` where N is the
number of anchors.
boxes_delta Can be `Tensors` or `Dict[Tensors]` Bounding box deltas
must have the same type and structure as `anchors`. The
shape of the array/tensor can be `(N, 4)` or `(B, N, 4)` where N is
the number of boxes.
anchor_format: str. The format of the input `anchors`.
(e.g., `"xyxy"`, `"xywh"`, etc.)
box_format: str. The desired format for the output boxes.
(e.g., `"xyxy"`, `"xywh"`, etc.)
encoded_format: str. Raw output format from regression head. Defaults
to `"center_yxhw"`.
variance: `List[floats]`. A 4-element array/tensor representing
variance factors to scale the box deltas. If provided, the deltas
are multiplied by the variance before being applied to the anchors.
Defaults to None.
image_shape: The shape of the image (height, width). This is needed
if normalization to image size is required when converting between
formats. Defaults to None.
Returns:
Decoded box coordinates. The return type matches the `box_format`.
Raises:
ValueError: If `variance` is not None and its length is not 4.
ValueError: If `encoded_format` is not `"center_xywh"` or
`"center_yxhw"`.
"""
if variance is not None:
variance = ops.convert_to_tensor(variance, "float32")
var_len = variance.shape[-1]

if var_len != 4:
raise ValueError(f"`variance` must be length 4, got {variance}")

if encoded_format not in ["center_xywh", "center_yxhw"]:
raise ValueError(
f"`encoded_format` should be 'center_xywh' or 'center_yxhw', "
f"but got '{encoded_format}'."
)

def decode_single_level(anchor, box_delta):
encoded_anchor = convert_format(
anchor,
source=anchor_format,
target="center_yxhw",
target=encoded_format,
image_shape=image_shape,
)
if variance is not None:
Expand All @@ -97,7 +187,7 @@ def decode_single_level(anchor, box_delta):
)
box = convert_format(
box,
source="center_yxhw",
source=encoded_format,
target=box_format,
image_shape=image_shape,
)
Expand Down
Loading

0 comments on commit dc912bd

Please sign in to comment.