text
stringlengths 5
45.8k
| id
stringlengths 18
93
| metadata
dict | __index_level_0__
int64 0
33
|
---|---|---|---|
sudo pip install --upgrade pip
sudo pip install -r requirements.txt --progress-bar off
sudo pip install -e ".[tests]"
sudo apt update
sudo apt install -y clang-format
| keras-cv/.devcontainer/setup.sh/0 | {
"file_path": "keras-cv/.devcontainer/setup.sh",
"repo_id": "keras-cv",
"token_count": 46
} | 0 |
import math
import random
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import keras_cv
from keras_cv.metrics import coco
def produce_random_data(
include_confidence=False, num_images=128, num_classes=20
):
"""Generates a fake list of bounding boxes for use in this test.
Returns:
a tensor list of size [128, 25, 5/6]. This represents 128 images, 25
bboxes and 5/6 dimensions to represent each bbox depending on if
confidence is set.
"""
images = []
for _ in range(num_images):
num_boxes = math.floor(25 * random.uniform(0, 1))
classes_in_image = np.floor(np.random.rand(num_boxes, 1) * num_classes)
bboxes = np.random.rand(num_boxes, 4)
boxes = np.concatenate([bboxes, classes_in_image], axis=-1)
if include_confidence:
confidence = np.random.rand(num_boxes, 1)
boxes = np.concatenate([boxes, confidence], axis=-1)
images.append(
keras_cv.utils.bounding_box.xywh_to_corners(
tf.constant(boxes, dtype=tf.float32)
)
)
images = [keras_cv.bounding_box.to_dense(x, max_boxes=25) for x in images]
return tf.stack(images, axis=0)
y_true = produce_random_data()
y_pred = produce_random_data(include_confidence=True)
class_ids = list(range(20))
n_images = [128, 256, 512, 512 + 256, 1024]
update_state_runtimes = []
result_runtimes = []
end_to_end_runtimes = []
for images in n_images:
y_true = produce_random_data(num_images=images)
y_pred = produce_random_data(num_images=images, include_confidence=True)
metric = coco._COCOMeanAveragePrecision(class_ids)
# warm up
metric.update_state(y_true, y_pred)
metric.result()
start = time.time()
metric.update_state(y_true, y_pred)
update_state_done = time.time()
r = metric.result()
end = time.time()
update_state_runtimes.append(update_state_done - start)
result_runtimes.append(end - update_state_done)
end_to_end_runtimes.append(end - start)
print("end_to_end_runtimes", end_to_end_runtimes)
data = pd.DataFrame(
{
"n_images": n_images,
"update_state_runtimes": update_state_runtimes,
"result_runtimes": result_runtimes,
"end_to_end_runtimes": end_to_end_runtimes,
}
)
sns.lineplot(data=data, x="n_images", y="update_state_runtimes")
plt.xlabel("Number of Images")
plt.ylabel("update_state() runtime (seconds)")
plt.title("Runtime of update_state()")
plt.show()
sns.lineplot(data=data, x="n_images", y="result_runtimes")
plt.xlabel("Number of Images")
plt.ylabel("result() runtime (seconds)")
plt.title("Runtime of result()")
plt.show()
sns.lineplot(data=data, x="n_images", y="end_to_end_runtimes")
plt.xlabel("Number of Images")
plt.ylabel("End to end runtime (seconds)")
plt.title("Runtimes of update_state() followed by result()")
plt.show()
| keras-cv/benchmarks/metrics/coco/mean_average_precision_performance.py/0 | {
"file_path": "keras-cv/benchmarks/metrics/coco/mean_average_precision_performance.py",
"repo_id": "keras-cv",
"token_count": 1229
} | 1 |
import time
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from tensorflow import keras
from keras_cv.layers import BaseImageAugmentationLayer
from keras_cv.layers import RandomSharpness
from keras_cv.utils import preprocessing
class OldRandomSharpness(BaseImageAugmentationLayer):
"""Randomly performs the sharpness operation on given images.
The sharpness operation first performs a blur operation, then blends between
the original image and the blurred image. This operation makes the edges of
an image less sharp than they were in the original image.
References:
- [PIL](https://pillow.readthedocs.io/en/stable/reference/ImageEnhance.html)
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image sharpness is impacted. `factor=0.0` makes this layer perform a
no-op operation, while a value of 1.0 uses the sharpened result
entirely. Values between 0 and 1 result in linear interpolation
between the original image and the sharpened image. Values should be
between `0.0` and `1.0`. If a tuple is used, a `factor` is sampled
between the two values for every image augmented. If a single float
is used, a value between `0.0` and the passed float is sampled. In
order to ensure the value is always the same, please pass a tuple
with two identical floats: `(0.5, 0.5)`.
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
""" # noqa: E501
def __init__(
self,
factor,
value_range,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.value_range = value_range
self.factor = preprocessing.parse_factor(factor)
self.seed = seed
def get_random_transformation(self, **kwargs):
return self.factor(dtype=self.compute_dtype)
def augment_image(self, image, transformation=None, **kwargs):
image = preprocessing.transform_value_range(
image,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
original_image = image
# Make image 4D for conv operation.
image = tf.expand_dims(image, axis=0)
# [1 1 1]
# [1 5 1]
# [1 1 1]
# all divided by 13 is the default 3x3 gaussian smoothing kernel.
# Correlating or Convolving with this filter is equivalent to performing
# a gaussian blur.
kernel = (
tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]],
dtype=self.compute_dtype,
shape=[3, 3, 1, 1],
)
/ 13.0
)
# Tile across channel dimension.
channels = tf.shape(image)[-1]
kernel = tf.tile(kernel, [1, 1, channels, 1])
strides = [1, 1, 1, 1]
smoothed_image = tf.nn.depthwise_conv2d(
image, kernel, strides, padding="VALID", dilations=[1, 1]
)
smoothed_image = tf.clip_by_value(smoothed_image, 0.0, 255.0)
smoothed_image = tf.squeeze(smoothed_image, axis=0)
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(smoothed_image)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_smoothed_image = tf.pad(smoothed_image, [[1, 1], [1, 1], [0, 0]])
result = tf.where(
tf.equal(padded_mask, 1), padded_smoothed_image, original_image
)
# Blend the final result.
result = preprocessing.blend(original_image, result, transformation)
result = preprocessing.transform_value_range(
result,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
return result
def augment_bounding_boxes(self, bounding_boxes, transformation, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
class RandomSharpnessTest(tf.test.TestCase):
def test_consistency_with_old_implementation(self):
images = tf.random.uniform(shape=(2, 64, 64, 3), minval=0, maxval=255)
old_layer = OldRandomSharpness(value_range=(0, 255), factor=(0.5, 0.5))
new_layer = RandomSharpness(value_range=(0, 255), factor=(0.5, 0.5))
old_output = old_layer(images)
new_output = new_layer(images)
self.assertAllClose(old_output, new_output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [RandomSharpness, OldRandomSharpness]
aug_args = {"value_range": (0, 255), "factor": 0.5}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
c = aug.__name__ + " XLA Mode"
layer = aug(**aug_args)
@tf.function(jit_compile=True)
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_random_sharpness.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_sharpness.py",
"repo_id": "keras-cv",
"token_count": 3608
} | 2 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for preprocessing demos."""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from keras_cv import bounding_box
def preprocess_voc(inputs, format, image_size):
"""mapping function to create batched image and bbox coordinates"""
inputs["image"] = tf.image.resize(inputs["image"], image_size)
inputs["objects"]["bbox"] = bounding_box.convert_format(
inputs["objects"]["bbox"],
images=inputs["image"],
source="rel_yxyx",
target=format,
)
return {
"images": inputs["image"],
"bounding_boxes": inputs["objects"]["bbox"],
}
def load_voc_dataset(
bounding_box_format,
name="voc/2007",
batch_size=9,
image_size=(224, 224),
):
dataset = tfds.load(name, split=tfds.Split.TRAIN, shuffle_files=True)
dataset = dataset.map(
lambda x: preprocess_voc(
x, format=bounding_box_format, image_size=image_size
),
num_parallel_calls=tf.data.AUTOTUNE,
)
dataset = dataset.padded_batch(
batch_size, padding_values={"images": None, "bounding_boxes": -1.0}
)
return dataset
def visualize_data(data, bounding_box_format):
data = next(iter(data))
images = data["images"]
bounding_boxes = data["bounding_boxes"]
output_images = visualize_bounding_boxes(
images, bounding_boxes, bounding_box_format
).numpy()
gallery_show(output_images)
def visualize_bounding_boxes(image, bounding_boxes, bounding_box_format):
color = np.array([[255.0, 0.0, 0.0]])
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=bounding_box_format,
target="rel_yxyx",
images=image,
)
return tf.image.draw_bounding_boxes(image, bounding_boxes, color, name=None)
def gallery_show(images):
images = images.astype(int)
for i in range(9):
image = images[i]
plt.subplot(3, 3, i + 1)
plt.imshow(image.astype("uint8"))
plt.axis("off")
plt.show()
| keras-cv/examples/layers/object_detection/demo_utils.py/0 | {
"file_path": "keras-cv/examples/layers/object_detection/demo_utils.py",
"repo_id": "keras-cv",
"token_count": 1028
} | 3 |
"""
Title: Generate an image from a text prompt using StableDiffusion
Author: fchollet
Date created: 2022/09/24
Last modified: 2022/09/24
Description: Use StableDiffusion to generate an image according to a short text
description.
"""
from PIL import Image
from keras_cv.models import StableDiffusion
model = StableDiffusion(img_height=512, img_width=512, jit_compile=True)
img = model.text_to_image(
"Photograph of a beautiful horse running through a field"
)
Image.fromarray(img[0]).save("horse.png")
print("Saved at horse.png")
| keras-cv/examples/models/generative/stable_diffusion/text_to_image.py/0 | {
"file_path": "keras-cv/examples/models/generative/stable_diffusion/text_to_image.py",
"repo_id": "keras-cv",
"token_count": 182
} | 4 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Keras backend module.
This module adds a temporary Keras API surface that is fully under KerasCV
control. The goal is to allow us to write Keras 3-like code everywhere, while
still supporting Keras 2. We do this by using the `keras_core` package with
Keras 2 to backport Keras 3 numerics APIs (`keras.ops` and `keras.random`) into
Keras 2. The sub-modules exposed are as follows:
- `config`: check which version of Keras is being run.
- `keras`: The full `keras` API with compat shims for older Keras versions.
- `ops`: `keras.ops` for Keras 3 or `keras_core.ops` for Keras 2.
- `random`: `keras.random` for Keras 3 or `keras_core.ops` for Keras 2.
"""
from keras_cv.backend import config # noqa: E402
from keras_cv.backend import keras # noqa: E402
from keras_cv.backend import ops # noqa: E402
from keras_cv.backend import random # noqa: E402
from keras_cv.backend import tf_ops # noqa: E402
def assert_tf_keras(src):
if config.keras_3():
raise NotImplementedError(
f"KerasCV component {src} does not yet support Keras 3, and can "
"only be used in `tf.keras`."
)
def supports_ragged():
return not config.keras_3()
| keras-cv/keras_cv/backend/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/backend/__init__.py",
"repo_id": "keras-cv",
"token_count": 587
} | 5 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv import backend
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import ops
from keras_cv.bounding_box.to_ragged import to_ragged
from keras_cv.bounding_box.validate_format import validate_format
@keras_cv_export("keras_cv.bounding_box.mask_invalid_detections")
def mask_invalid_detections(bounding_boxes, output_ragged=False):
"""masks out invalid detections with -1s.
This utility is mainly used on the output of non-max suppression operations.
The output of non-max-suppression contains all the detections, even invalid
ones. Users are expected to use `num_detections` to determine how many boxes
are in each image.
In contrast, KerasCV expects all bounding boxes to be padded with -1s.
This function uses the value of `num_detections` to mask out
invalid boxes with -1s.
Args:
bounding_boxes: a dictionary complying with KerasCV bounding box format.
In addition to the normal required keys, these boxes are also
expected to have a `num_detections` key.
output_ragged: whether to output RaggedTensor based bounding
boxes.
Returns:
bounding boxes with proper masking of the boxes according to
`num_detections`. This allows proper interop with non-max supression.
Returned boxes match the specification fed to the function, so if the
bounding box tensor uses `tf.RaggedTensor` to represent boxes the
returned value will also return `tf.RaggedTensor` representations.
"""
# ensure we are complying with KerasCV bounding box format.
info = validate_format(bounding_boxes)
if info["ragged"]:
raise ValueError(
"`bounding_box.mask_invalid_detections()` requires inputs to be "
"Dense tensors. Please call "
"`bounding_box.to_dense(bounding_boxes)` before passing your boxes "
"to `bounding_box.mask_invalid_detections()`."
)
if "num_detections" not in bounding_boxes:
raise ValueError(
"`bounding_boxes` must have key 'num_detections' "
"to be used with `bounding_box.mask_invalid_detections()`."
)
boxes = bounding_boxes.get("boxes")
classes = bounding_boxes.get("classes")
confidence = bounding_boxes.get("confidence", None)
num_detections = bounding_boxes.get("num_detections")
# Create a mask to select only the first N boxes from each batch
mask = ops.cast(
ops.expand_dims(ops.arange(boxes.shape[1]), axis=0),
num_detections.dtype,
)
mask = mask < num_detections[:, None]
classes = ops.where(mask, classes, -ops.ones_like(classes))
if confidence is not None:
confidence = ops.where(mask, confidence, -ops.ones_like(confidence))
# reuse mask for boxes
mask = ops.expand_dims(mask, axis=-1)
mask = ops.repeat(mask, repeats=boxes.shape[-1], axis=-1)
boxes = ops.where(mask, boxes, -ops.ones_like(boxes))
result = bounding_boxes.copy()
result["boxes"] = boxes
result["classes"] = classes
if confidence is not None:
result["confidence"] = confidence
if output_ragged and backend.supports_ragged():
return to_ragged(result)
return result
| keras-cv/keras_cv/bounding_box/mask_invalid_detections.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/mask_invalid_detections.py",
"repo_id": "keras-cv",
"token_count": 1362
} | 6 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from tensorflow import keras
from keras_cv.callbacks import WaymoEvaluationCallback
from keras_cv.tests.test_case import TestCase
NUM_RECORDS = 10
POINT_FEATURES = 3
NUM_POINTS = 20
NUM_BOXES = 2
BOX_FEATURES = 7
METRIC_KEYS = [
"average_precision_vehicle_l1",
"average_precision_vehicle_l2",
"average_precision_ped_l1",
"average_precision_ped_l2",
]
class WaymoEvaluationCallbackTest(TestCase):
@pytest.mark.skipif(True, reason="Requires Waymo Open Dataset")
def test_model_fit(self):
# Silly hypothetical model
model = self.build_model()
points = tf.random.normal((NUM_RECORDS, POINT_FEATURES, NUM_POINTS))
# Some random boxes, and some -1 boxes (to mimic padding ragged boxes)
boxes = tf.concat(
[
tf.random.uniform((NUM_RECORDS // 2, NUM_BOXES, BOX_FEATURES)),
tf.cast(
tf.fill((NUM_RECORDS // 2, NUM_BOXES, BOX_FEATURES), -1),
tf.float32,
),
],
axis=0,
)
dataset = tf.data.Dataset.from_tensor_slices(
(
points,
{
"3d_boxes": {
"boxes": boxes,
"classes": np.ones((NUM_RECORDS, NUM_BOXES)),
"difficulty": np.ones((NUM_RECORDS, NUM_BOXES)),
"mask": tf.concat(
[
np.ones((NUM_RECORDS // 2, NUM_BOXES)),
np.zeros((NUM_RECORDS // 2, NUM_BOXES)),
],
axis=0,
),
}
},
)
).batch(5)
callback = WaymoEvaluationCallback(validation_data=dataset)
history = model.fit(points, boxes, callbacks=[callback])
self.assertAllInSet(METRIC_KEYS, history.history.keys())
def build_model(self):
inputs = keras.Input(shape=(POINT_FEATURES, NUM_POINTS))
x = keras.layers.Flatten()(inputs)
# Add extra features for class and confidence
x = keras.layers.Dense(NUM_BOXES * (BOX_FEATURES + 2))(x)
x = keras.layers.Reshape((NUM_BOXES, BOX_FEATURES + 2))(x)
x = keras.layers.Lambda(
lambda x: {
"3d_boxes": {
"boxes": x[:, :, :7],
"classes": tf.abs(x[:, :, 7]),
"confidence": x[:, :, 8],
}
}
)(x)
class MeanLoss(keras.losses.Loss):
def call(self, y_true, y_pred):
return tf.reduce_mean(y_pred, axis=-1)
model = keras.Model(inputs=inputs, outputs=x)
model.compile(loss=MeanLoss())
return model
| keras-cv/keras_cv/callbacks/waymo_evaluation_callback_test.py/0 | {
"file_path": "keras-cv/keras_cv/callbacks/waymo_evaluation_callback_test.py",
"repo_id": "keras-cv",
"token_count": 1757
} | 7 |
/* Copyright 2022 The KerasCV Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#define EIGEN_USE_THREADS
#include "keras_cv/custom_ops/box_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
namespace kerascv {
class WithinBoxOp : public OpKernel {
public:
explicit WithinBoxOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& points = ctx->input(0);
const Tensor& boxes = ctx->input(1);
const int num_points = points.dim_size(0);
const int num_boxes = boxes.dim_size(0);
Tensor* box_indices = nullptr;
OP_REQUIRES_OK(
ctx, ctx->allocate_output("box_indices", TensorShape({num_points}),
&box_indices));
auto boxes_indices_t = box_indices->flat<int>();
for (auto i = 0; i < num_points; ++i) boxes_indices_t(i) = -1;
std::vector<box::Upright3DBox> boxes_vec = box::ParseBoxesFromTensor(boxes);
std::vector<box::Vertex> points_vec = box::ParseVerticesFromTensor(points);
std::vector<int> p_indices_x(num_points);
// index x range [0, num_points)
std::iota(p_indices_x.begin(), p_indices_x.end(), 0);
// index y range [0, num_points)
std::vector<int> p_indices_y(p_indices_x);
// sort, return sorted value and indices
std::sort(p_indices_x.begin(), p_indices_x.end(),
[&points_vec](const int& a, const int& b) -> bool {
return points_vec[a].x < points_vec[b].x;
});
std::sort(p_indices_y.begin(), p_indices_y.end(),
[&points_vec](const int& a, const int& b) -> bool {
return points_vec[a].y < points_vec[b].y;
});
std::vector<double> sorted_points_x;
sorted_points_x.reserve(num_points);
std::vector<double> sorted_points_y;
sorted_points_y.reserve(num_points);
for (int i = 0; i < num_points; ++i) {
sorted_points_x.emplace_back(points_vec[p_indices_x[i]].x);
sorted_points_y.emplace_back(points_vec[p_indices_y[i]].y);
}
// for each box, find all point indices whose x values are within box
// boundaries when the box is rotated, the box boundary is the minimum and
// maximum x for all vertices
std::vector<int> points_x_min =
box::GetMinXIndexFromBoxes(boxes_vec, sorted_points_x);
std::vector<int> points_x_max =
box::GetMaxXIndexFromBoxes(boxes_vec, sorted_points_x);
std::vector<std::unordered_set<int>> points_x_indices(num_boxes);
auto set_fn_x = [&points_x_min, &points_x_max, &p_indices_x,
&points_x_indices](int64_t begin, int64_t end) {
for (int64_t idx = begin; idx < end; ++idx) {
std::unordered_set<int> p_set;
int p_start = points_x_min[idx];
int p_end = points_x_max[idx];
for (auto p_idx = p_start; p_idx <= p_end; ++p_idx) {
p_set.insert(p_indices_x[p_idx]);
}
points_x_indices[idx] = p_set;
}
};
const CPUDevice& device = ctx->eigen_device<CPUDevice>();
const Eigen::TensorOpCost cost(num_points, num_boxes, 3);
device.parallelFor(num_boxes, cost, set_fn_x);
// for each box, find all point indices whose y values are within box
// boundaries when the box is rotated, the box boundary is the minimum and
// maximum x for all vertices
std::vector<int> points_y_min =
box::GetMinYIndexFromBoxes(boxes_vec, sorted_points_y);
std::vector<int> points_y_max =
box::GetMaxYIndexFromBoxes(boxes_vec, sorted_points_y);
std::vector<std::unordered_set<int>> points_y_indices(num_boxes);
auto set_fn_y = [&points_y_min, &points_y_max, &p_indices_y,
&points_y_indices](int64_t begin, int64_t end) {
for (int64_t idx = begin; idx < end; ++idx) {
std::unordered_set<int> p_set;
int p_start = points_y_min[idx];
int p_end = points_y_max[idx];
for (auto p_idx = p_start; p_idx <= p_end; ++p_idx) {
p_set.insert(p_indices_y[p_idx]);
}
points_y_indices[idx] = p_set;
}
};
device.parallelFor(num_boxes, cost, set_fn_y);
// for the intersection of x indices set and y indices set, check if
// those points are within the box
auto within_fn = [&points_x_indices, &points_y_indices, &boxes_vec,
&points_vec,
&boxes_indices_t](int64_t begin, int64_t end) {
for (int64_t idx = begin; idx < end; ++idx) {
std::unordered_set<int>& set_a = points_x_indices[idx];
std::unordered_set<int>& set_b = points_y_indices[idx];
std::unordered_set<int> p_set;
for (auto val : set_a) {
if (set_b.find(val) != set_b.end()) {
p_set.insert(val);
}
}
box::Upright3DBox& box = boxes_vec[idx];
for (auto p_idx : p_set) {
box::Vertex& point = points_vec[p_idx];
if (box.WithinBox3D(point)) {
boxes_indices_t(p_idx) = idx;
}
}
}
};
device.parallelFor(num_boxes, cost, within_fn);
}
};
REGISTER_KERNEL_BUILDER(Name("KcvWithinBox").Device(DEVICE_CPU), WithinBoxOp);
} // namespace kerascv
} // namespace tensorflow
| keras-cv/keras_cv/custom_ops/kernels/withinbox_op.cc/0 | {
"file_path": "keras-cv/keras_cv/custom_ops/kernels/withinbox_op.cc",
"repo_id": "keras-cv",
"token_count": 2641
} | 8 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader for the Waymo Open Dataset."""
import os
import tensorflow as tf
from keras_cv.datasets.waymo import transformer
from keras_cv.utils import assert_waymo_open_dataset_installed
try:
import waymo_open_dataset
except ImportError:
waymo_open_dataset = None
from keras_cv.api_export import keras_cv_export
def _generate_frames(segments, transformer):
def _generator():
for record in segments:
frame = waymo_open_dataset.dataset_pb2.Frame()
frame.ParseFromString(record.numpy())
yield transformer(frame)
return _generator
@keras_cv_export(
"keras_cv.datasets.waymo.load", package="keras_cv.datasets.waymo"
)
def load(
tfrecord_path,
transformer=transformer.build_tensors_from_wod_frame,
output_signature=transformer.WOD_FRAME_OUTPUT_SIGNATURE,
):
"""
Loads the Waymo Open Dataset and transforms frames into features as
tensors.
References:
- [Waymo Dataset Research Paper](https://arxiv.org/abs/1912.04838)
- [Waymo Dataset Website](https://waymo.com/open/)
Args:
tfrecord_path: a string pointing to the directory containing the raw
tfrecords in the Waymo Open Dataset, or a list of strings pointing
to the tfrecords themselves
transformer: a Python function which transforms a Waymo Open Dataset
Frame object into tensors, defaults to convert range image to point
cloud.
output_signature: the type specification of the tensors created by the
transformer. This is often a dictionary from feature column names to
tf.TypeSpecs, defaults to point cloud representations of Waymo Open
Dataset data.
Returns:
tf.data.Dataset containing the features extracted from Frames using the
provided transformer.
Example:
```python
from keras_cv.datasets.waymo import load
def simple_transformer(frame):
return {"timestamp_micros": frame.timestamp_micros}
output_signature = {"timestamp_micros": tf.TensorSpec((), tf.int64)}
load("/path/to/tfrecords", simple_transformer, output_signature)
```
"""
assert_waymo_open_dataset_installed("keras_cv.datasets.waymo.load()")
if isinstance(tfrecord_path, list):
filenames = tfrecord_path
else:
filenames = tf.data.TFRecordDataset.list_files(
os.path.join(tfrecord_path, "*.tfrecord")
)
segments = tf.data.TFRecordDataset(filenames)
return tf.data.Dataset.from_generator(
_generate_frames(segments, transformer),
output_signature=output_signature,
)
| keras-cv/keras_cv/datasets/waymo/load.py/0 | {
"file_path": "keras-cv/keras_cv/datasets/waymo/load.py",
"repo_id": "keras-cv",
"token_count": 1190
} | 9 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tensorflow import keras
from keras_cv.layers import FeaturePyramid
from keras_cv.tests.test_case import TestCase
class FeaturePyramidTest(TestCase):
def test_return_type_dict(self):
layer = FeaturePyramid(min_level=2, max_level=5)
c2 = np.ones([2, 64, 64, 3])
c3 = np.ones([2, 32, 32, 3])
c4 = np.ones([2, 16, 16, 3])
c5 = np.ones([2, 8, 8, 3])
inputs = {2: c2, 3: c3, 4: c4, 5: c5}
output = layer(inputs)
self.assertTrue(isinstance(output, dict))
self.assertEquals(sorted(output.keys()), [2, 3, 4, 5])
def test_result_shapes(self):
layer = FeaturePyramid(min_level=2, max_level=5)
c2 = np.ones([2, 64, 64, 3])
c3 = np.ones([2, 32, 32, 3])
c4 = np.ones([2, 16, 16, 3])
c5 = np.ones([2, 8, 8, 3])
inputs = {2: c2, 3: c3, 4: c4, 5: c5}
output = layer(inputs)
for level in inputs.keys():
self.assertEquals(output[level].shape[1], inputs[level].shape[1])
self.assertEquals(output[level].shape[2], inputs[level].shape[2])
self.assertEquals(output[level].shape[3], layer.num_channels)
# Test with different resolution and channel size
c2 = np.ones([2, 64, 128, 4])
c3 = np.ones([2, 32, 64, 8])
c4 = np.ones([2, 16, 32, 16])
c5 = np.ones([2, 8, 16, 32])
inputs = {2: c2, 3: c3, 4: c4, 5: c5}
layer = FeaturePyramid(min_level=2, max_level=5)
output = layer(inputs)
for level in inputs.keys():
self.assertEquals(output[level].shape[1], inputs[level].shape[1])
self.assertEquals(output[level].shape[2], inputs[level].shape[2])
self.assertEquals(output[level].shape[3], layer.num_channels)
def test_with_keras_input_tensor(self):
# This mimic the model building with Backbone network
layer = FeaturePyramid(min_level=2, max_level=5)
c2 = keras.layers.Input([64, 64, 3])
c3 = keras.layers.Input([32, 32, 3])
c4 = keras.layers.Input([16, 16, 3])
c5 = keras.layers.Input([8, 8, 3])
inputs = {2: c2, 3: c3, 4: c4, 5: c5}
output = layer(inputs)
for level in inputs.keys():
self.assertEquals(output[level].shape[1], inputs[level].shape[1])
self.assertEquals(output[level].shape[2], inputs[level].shape[2])
self.assertEquals(output[level].shape[3], layer.num_channels)
def test_invalid_lateral_layers(self):
lateral_layers = [keras.layers.Conv2D(256, 1)] * 3
with self.assertRaisesRegexp(
ValueError, "Expect lateral_layers to be a dict"
):
_ = FeaturePyramid(
min_level=2, max_level=5, lateral_layers=lateral_layers
)
lateral_layers = {
2: keras.layers.Conv2D(256, 1),
3: keras.layers.Conv2D(256, 1),
4: keras.layers.Conv2D(256, 1),
}
with self.assertRaisesRegexp(
ValueError, "with keys as .* [2, 3, 4, 5]"
):
_ = FeaturePyramid(
min_level=2, max_level=5, lateral_layers=lateral_layers
)
def test_invalid_output_layers(self):
output_layers = [keras.layers.Conv2D(256, 3)] * 3
with self.assertRaisesRegexp(
ValueError, "Expect output_layers to be a dict"
):
_ = FeaturePyramid(
min_level=2, max_level=5, output_layers=output_layers
)
output_layers = {
2: keras.layers.Conv2D(256, 3),
3: keras.layers.Conv2D(256, 3),
4: keras.layers.Conv2D(256, 3),
}
with self.assertRaisesRegexp(
ValueError, "with keys as .* [2, 3, 4, 5]"
):
_ = FeaturePyramid(
min_level=2, max_level=5, output_layers=output_layers
)
def test_invalid_input_features(self):
layer = FeaturePyramid(min_level=2, max_level=5)
c2 = np.ones([2, 64, 64, 3])
c3 = np.ones([2, 32, 32, 3])
c4 = np.ones([2, 16, 16, 3])
c5 = np.ones([2, 8, 8, 3])
inputs = {2: c2, 3: c3, 4: c4, 5: c5}
# Build required for Keas 3
_ = layer(inputs)
list_input = [c2, c3, c4, c5]
with self.assertRaisesRegexp(
ValueError, "expects input features to be a dict"
):
layer(list_input)
dict_input_with_missing_feature = {2: c2, 3: c3, 4: c4}
with self.assertRaisesRegexp(
ValueError, "Expect feature keys.*[2, 3, 4, 5]"
):
layer(dict_input_with_missing_feature)
| keras-cv/keras_cv/layers/feature_pyramid_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/feature_pyramid_test.py",
"repo_id": "keras-cv",
"token_count": 2569
} | 10 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Mapping
from typing import Optional
from typing import Tuple
from typing import Union
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import assert_tf_keras
@keras_cv_export("keras_cv.layers.ROIGenerator")
class ROIGenerator(keras.layers.Layer):
"""
Generates region of interests (ROI, or proposal) from scores.
Mainly used in Region CNN (RCNN) networks.
This works for a multi-level input, both boxes and scores are dictionary
inputs with the same set of keys.
Users can configure top k and threshold differently in train and inference.
Users can choose to combine all levels if NMS across all levels are desired.
The following steps are applied to pair of (boxes, scores):
1) pre_nms_topk scores and boxes sorted and selected per level
2) nms applied and selected post_nms_topk scores and ROIs per level
3) combined scores and ROIs across all levels
4) post_nms_topk scores and ROIs sorted and selected
Args:
bounding_box_format: a case-insensitive string.
For detailed information on the supported format, see the
[KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/).
pre_nms_topk_train: int. number of top k scoring proposals to keep
before applying NMS in training mode. When RPN is run on multiple
feature maps / levels (as in FPN) this number is per
feature map / level.
nms_score_threshold_train: float. score threshold to use for NMS in
training mode.
nms_iou_threshold_train: float. IOU threshold to use for NMS in training
mode.
post_nms_topk_train: int. number of top k scoring proposals to keep
after applying NMS in training mode. When RPN is run on multiple
feature maps / levels (as in FPN) this number is per
feature map / level.
pre_nms_topk_test: int. number of top k scoring proposals to keep before
applying NMS in inference mode. When RPN is run on multiple
feature maps / levels (as in FPN) this number is per
feature map / level.
nms_score_threshold_test: float. score threshold to use for NMS in
inference mode.
nms_iou_threshold_test: float. IOU threshold to use for NMS in inference
mode.
post_nms_topk_test: int. number of top k scoring proposals to keep after
applying NMS in inference mode. When RPN is run on multiple
feature maps / levels (as in FPN) this number is per
feature map / level.
Usage:
```python
roi_generator = ROIGenerator("xyxy")
boxes = {2: tf.random.normal([32, 5, 4])}
scores = {2: tf.random.normal([32, 5])}
rois, roi_scores = roi_generator(boxes, scores, training=True)
```
""" # noqa: E501
def __init__(
self,
bounding_box_format,
pre_nms_topk_train: int = 2000,
nms_score_threshold_train: float = 0.0,
nms_iou_threshold_train: float = 0.7,
post_nms_topk_train: int = 1000,
pre_nms_topk_test: int = 1000,
nms_score_threshold_test: float = 0.0,
nms_iou_threshold_test: float = 0.7,
post_nms_topk_test: int = 1000,
**kwargs,
):
assert_tf_keras("keras_cv.layers.ROIGenerator")
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.pre_nms_topk_train = pre_nms_topk_train
self.nms_score_threshold_train = nms_score_threshold_train
self.nms_iou_threshold_train = nms_iou_threshold_train
self.post_nms_topk_train = post_nms_topk_train
self.pre_nms_topk_test = pre_nms_topk_test
self.nms_score_threshold_test = nms_score_threshold_test
self.nms_iou_threshold_test = nms_iou_threshold_test
self.post_nms_topk_test = post_nms_topk_test
self.built = True
def call(
self,
multi_level_boxes: Union[tf.Tensor, Mapping[int, tf.Tensor]],
multi_level_scores: Union[tf.Tensor, Mapping[int, tf.Tensor]],
training: Optional[bool] = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""
Args:
multi_level_boxes: float Tensor. A dictionary or single Tensor of
boxes, one per level. Shape is [batch_size, num_boxes, 4] each
level, in `bounding_box_format`. The boxes from RPNs are usually
encoded as deltas w.r.t to anchors, they need to be decoded before
passing in here.
multi_level_scores: float Tensor. A dictionary or single Tensor of
scores, typically confidence scores, one per level. Shape is
[batch_size, num_boxes] each level.
Returns:
rois: float Tensor of [batch_size, post_nms_topk, 4]
roi_scores: float Tensor of [batch_size, post_nms_topk]
"""
if training:
pre_nms_topk = self.pre_nms_topk_train
post_nms_topk = self.post_nms_topk_train
nms_score_threshold = self.nms_score_threshold_train
nms_iou_threshold = self.nms_iou_threshold_train
else:
pre_nms_topk = self.pre_nms_topk_test
post_nms_topk = self.post_nms_topk_test
nms_score_threshold = self.nms_score_threshold_test
nms_iou_threshold = self.nms_iou_threshold_test
def per_level_gen(boxes, scores):
scores_shape = scores.get_shape().as_list()
# scores can also be [batch_size, num_boxes, 1]
if len(scores_shape) == 3:
scores = tf.squeeze(scores, axis=-1)
_, num_boxes = scores.get_shape().as_list()
level_pre_nms_topk = min(num_boxes, pre_nms_topk)
level_post_nms_topk = min(num_boxes, post_nms_topk)
scores, sorted_indices = tf.nn.top_k(
scores, k=level_pre_nms_topk, sorted=True
)
boxes = tf.gather(boxes, sorted_indices, batch_dims=1)
# convert from input format to yxyx for the TF NMS operation
boxes = bounding_box.convert_format(
boxes,
source=self.bounding_box_format,
target="yxyx",
)
# TODO(tanzhenyu): consider supporting soft / batched nms for accl
selected_indices, num_valid = tf.image.non_max_suppression_padded(
boxes,
scores,
max_output_size=level_post_nms_topk,
iou_threshold=nms_iou_threshold,
score_threshold=nms_score_threshold,
pad_to_max_output_size=True,
sorted_input=True,
canonicalized_coordinates=True,
)
# convert back to input format
boxes = bounding_box.convert_format(
boxes,
source="yxyx",
target=self.bounding_box_format,
)
level_rois = tf.gather(boxes, selected_indices, batch_dims=1)
level_roi_scores = tf.gather(scores, selected_indices, batch_dims=1)
level_rois = level_rois * tf.cast(
tf.reshape(tf.range(level_post_nms_topk), [1, -1, 1])
< tf.reshape(num_valid, [-1, 1, 1]),
level_rois.dtype,
)
level_roi_scores = level_roi_scores * tf.cast(
tf.reshape(tf.range(level_post_nms_topk), [1, -1])
< tf.reshape(num_valid, [-1, 1]),
level_roi_scores.dtype,
)
return level_rois, level_roi_scores
if not isinstance(multi_level_boxes, dict):
return per_level_gen(multi_level_boxes, multi_level_scores)
rois = []
roi_scores = []
for level in sorted(multi_level_scores.keys()):
boxes = multi_level_boxes[level]
scores = multi_level_scores[level]
level_rois, level_roi_scores = per_level_gen(boxes, scores)
rois.append(level_rois)
roi_scores.append(level_roi_scores)
rois = tf.concat(rois, axis=1)
roi_scores = tf.concat(roi_scores, axis=1)
_, num_valid_rois = roi_scores.get_shape().as_list()
overall_top_k = min(num_valid_rois, post_nms_topk)
roi_scores, sorted_indices = tf.nn.top_k(
roi_scores, k=overall_top_k, sorted=True
)
rois = tf.gather(rois, sorted_indices, batch_dims=1)
return rois, roi_scores
def get_config(self):
config = {
"bounding_box_format": self.bounding_box_format,
"pre_nms_topk_train": self.pre_nms_topk_train,
"nms_score_threshold_train": self.nms_score_threshold_train,
"nms_iou_threshold_train": self.nms_iou_threshold_train,
"post_nms_topk_train": self.post_nms_topk_train,
"pre_nms_topk_test": self.pre_nms_topk_test,
"nms_score_threshold_test": self.nms_score_threshold_test,
"nms_iou_threshold_test": self.nms_iou_threshold_test,
"post_nms_topk_test": self.post_nms_topk_test,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/object_detection/roi_generator.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/roi_generator.py",
"repo_id": "keras-cv",
"token_count": 4625
} | 11 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.object_detection_3d import voxel_utils
EPSILON = 1e-4
VOXEL_FEATURE_MIN = -1000
def compute_point_voxel_id(point_voxel_xyz, voxel_spatial_size):
"""Computes point voxel IDs.
Args:
point_voxel_xyz: [B, N, dim] voxel coordinates for each point
voxel_spatial_size: voxel spatial size
Returns:
point_voxel_id: [B, N] unique ID of each voxel.
"""
batch_size, _, dim = list(point_voxel_xyz.shape)
if batch_size is None:
batch_size = ops.shape(point_voxel_xyz)[0]
assert dim == len(voxel_spatial_size), f"{point_voxel_xyz.shape}"
voxel_spatial_size_prod = [
np.prod(voxel_spatial_size[i:]).item() for i in range(dim)
]
voxel_spatial_size_prod_shift = voxel_spatial_size_prod[1:] + [1]
point_voxel_xyz_multiplied = point_voxel_xyz * ops.array(
voxel_spatial_size_prod_shift, dtype=point_voxel_xyz.dtype
)
# [B, N]
point_voxel_id = ops.sum(point_voxel_xyz_multiplied, axis=-1)
if batch_size == 1:
return point_voxel_id
batch_multiplier = (
ops.arange(batch_size, dtype="int32") * voxel_spatial_size_prod[0]
)
batch_multiplier = ops.cast(
ops.expand_dims(batch_multiplier, axis=-1), point_voxel_id.dtype
)
return point_voxel_id + batch_multiplier
class PointToVoxel(keras.layers.Layer):
"""Voxelization layer."""
def __init__(self, voxel_size, spatial_size, **kwargs):
"""Voxelization layer constructor.
Args:
voxel_size: voxel size in each xyz dimension.
spatial_size: max/min range in each dim in global coordinate frame.
name: layer name
**kwargs: additional key value args (e.g. dtype) passed to the parent
class.
"""
super().__init__(**kwargs)
dim = len(voxel_size)
assert len(spatial_size) == 2 * dim, f"{spatial_size}"
self._voxel_size = voxel_size
self._spatial_size = spatial_size
self._voxel_spatial_size = voxel_utils.compute_voxel_spatial_size(
spatial_size, self._voxel_size
)
# TODO(tanzhenyu): consider using keras masking.
def call(self, point_xyz, point_mask):
"""Dynamically voxelizes points.
B: batch_size.
N: number of points.
dim: the input dimension.
Args:
point_xyz: [B, N, dim] point xyz in global coordinate relative to sdc.
point_mask: [B, N] valid point mask.
Returns:
point_voxel_feature: [B, N, dim] voxel feature (delta_{x,y,z}).
point_voxel_id: [B, N] voxel ID of each point. Invalid voxels have
Id's set to 0.
point_voxel_mask: [B, N] validpoint voxel boolean mask.
"""
# [B, N, dim]
# convert from point coordinate to voxel index
point_voxel_xyz_float = ops.floor(
point_xyz / ops.array(self._voxel_size, point_xyz.dtype) + 0.5
)
# [B, N, dim]
# delta to the nearest voxel
point_voxel_feature = ops.cast(
point_xyz
- (
point_voxel_xyz_float
* ops.array(self._voxel_size, dtype=point_voxel_xyz_float.dtype)
),
point_xyz.dtype,
)
# [B, N, dim]
point_voxel_xyz_int = ops.cast(point_voxel_xyz_float, "int32")
# [dim]
# get xmin, ymin, zmin
voxel_origin = voxel_utils.compute_voxel_origin(
self._spatial_size, self._voxel_size
)
# [B, N, dim]
# convert point voxel to positive voxel index
point_voxel_xyz = point_voxel_xyz_int - ops.cast(
ops.expand_dims(ops.expand_dims(voxel_origin, axis=0), axis=0),
point_voxel_xyz_int.dtype,
)
# [B, N]
# remove points outside the voxel boundary
point_voxel_mask = ops.logical_and(
point_voxel_xyz >= 0,
point_voxel_xyz
< ops.array(self._voxel_spatial_size, dtype=point_voxel_xyz.dtype),
)
point_voxel_mask = ops.all(point_voxel_mask, axis=-1)
point_voxel_mask = ops.logical_and(point_voxel_mask, point_mask)
# [B, N]
point_voxel_mask_int = ops.cast(point_voxel_mask, dtype="int32")
# [B, N] for voxel_id, int constant for num_voxels, in the range of
# [0, B * num_voxels]
point_voxel_id = ops.cast(
compute_point_voxel_id(point_voxel_xyz, self._voxel_spatial_size),
point_voxel_mask_int.dtype,
)
# [B, N]
point_voxel_id = point_voxel_id * point_voxel_mask_int
return point_voxel_feature, point_voxel_id, point_voxel_mask
@keras_cv_export("keras_cv.layers.DynamicVoxelization")
class DynamicVoxelization(keras.layers.Layer):
"""Dynamic voxelization and pool layer.
This layer assigns and pools points into voxels,
then it concatenates with point features and feed into a neural network,
and max pools all point features inside each voxel.
Args:
voxel_size: the x, y, z dimension of each voxel.
spatial_size: the x, y, z boundary of voxels
Returns:
voxelized feature, a float Tensor.
"""
def __init__(self, voxel_size, spatial_size, **kwargs):
super().__init__(**kwargs)
self._voxelization_layer = PointToVoxel(
voxel_size=voxel_size, spatial_size=spatial_size
)
self._voxel_size = voxel_size
self._spatial_size = spatial_size
self._voxel_spatial_size = voxel_utils.compute_voxel_spatial_size(
spatial_size, self._voxel_size
)
self._voxel_spatial_size_volume = np.prod(
self._voxel_spatial_size
).item()
self.point_net_dense = keras.layers.Dense(128)
self.point_net_norm = keras.layers.BatchNormalization()
self.point_net_activation = keras.layers.ReLU()
self.built = True
def call(self, point_xyz, point_feature, point_mask, training=True):
"""Voxelizes and learns voxel features with a point net.
B: batch_size.
N: number of points.
dim: the input dimension.
Args:
point_xyz: [B, N, 3] point xyz in global coordinate.
point_feature: [B, N, dim] point feature inputs.
point_mask: [B, N] valid point mask.
training: whether it is in training mode.
Returns:
voxel_feature: [B, x_max, y_max, {z_max,}, mlp_dimension] voxel
features. If z_max is 1, z-dim is squeezed.
"""
(
point_voxel_feature,
point_voxel_id,
point_voxel_mask,
) = self._voxelization_layer(point_xyz=point_xyz, point_mask=point_mask)
# TODO(tanzhenyu): move compute_point_voxel_id to here, so PointToVoxel
# layer is more generic.
point_feature = ops.concatenate(
[point_feature, point_voxel_feature], axis=-1
)
batch_size = list(point_feature.shape)[0] or ops.shape(point_feature)[0]
# [B, N, 1]
point_mask_float = ops.expand_dims(
ops.cast(point_voxel_mask, point_feature.dtype), axis=-1
)
# [B, N, dim]
point_feature = point_feature * point_mask_float
point_feature = self.point_net_dense(point_feature)
point_feature = self.point_net_norm(
point_feature, training=training, mask=point_mask
)
point_feature = self.point_net_activation(point_feature)
# [B, N, new_dim]
point_feature = point_feature * point_mask_float
new_dim = list(point_feature.shape)[-1]
point_feature = ops.reshape(point_feature, [-1, new_dim])
point_voxel_id = ops.cast(ops.reshape(point_voxel_id, [-1]), "int32")
# [B * num_voxels, new_dim]
voxel_feature = ops.segment_max(
point_feature,
point_voxel_id,
batch_size * self._voxel_spatial_size_volume,
)
# unsorted_segment_max sets empty values to -inf(float).
voxel_feature_valid_mask = voxel_feature > VOXEL_FEATURE_MIN
voxel_feature = voxel_feature * ops.cast(
voxel_feature_valid_mask, dtype=voxel_feature.dtype
)
out_shape = [batch_size] + self._voxel_spatial_size + [new_dim]
if out_shape[-2] == 1:
out_shape = out_shape[:-2] + [out_shape[-1]]
voxel_feature = ops.reshape(voxel_feature, out_shape)
return voxel_feature
def compute_output_shape(self, input_shape):
return tuple([input_shape[0]] + self._voxel_spatial_size[:-1] + [128])
| keras-cv/keras_cv/layers/object_detection_3d/voxelization.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection_3d/voxelization.py",
"repo_id": "keras-cv",
"token_count": 4461
} | 12 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.equalization import Equalization
from keras_cv.tests.test_case import TestCase
class EqualizationTest(TestCase):
def test_return_shapes(self):
xs = 255 * np.ones((2, 512, 512, 3), dtype=np.int32)
layer = Equalization(value_range=(0, 255))
xs = layer(xs)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertAllEqual(xs, 255 * np.ones((2, 512, 512, 3)))
@pytest.mark.tf_keras_only
def test_return_shapes_inside_model(self):
layer = Equalization(value_range=(0, 255))
inp = keras.layers.Input(shape=[512, 512, 5])
out = layer(inp)
model = keras.models.Model(inp, out)
self.assertEqual(model.output_shape, (None, 512, 512, 5))
def test_equalizes_to_all_bins(self):
xs = np.random.uniform(size=(2, 512, 512, 3), low=0, high=255).astype(
np.float32
)
layer = Equalization(value_range=(0, 255))
xs = layer(xs)
for i in range(0, 256):
self.assertTrue(np.any(ops.convert_to_numpy(xs) == i))
@parameterized.named_parameters(
("float32", np.float32), ("int32", np.int32), ("int64", np.int64)
)
def test_input_dtypes(self, dtype):
xs = np.random.uniform(size=(2, 512, 512, 3), low=0, high=255).astype(
dtype
)
layer = Equalization(value_range=(0, 255))
xs = ops.convert_to_numpy(layer(xs))
for i in range(0, 256):
self.assertTrue(np.any(xs == i))
self.assertAllInRange(xs, 0, 255)
@parameterized.named_parameters(("0_255", 0, 255), ("0_1", 0, 1))
def test_output_range(self, lower, upper):
xs = np.random.uniform(
size=(2, 512, 512, 3), low=lower, high=upper
).astype(np.float32)
layer = Equalization(value_range=(lower, upper))
xs = ops.convert_to_numpy(layer(xs))
self.assertAllInRange(xs, lower, upper)
| keras-cv/keras_cv/layers/preprocessing/equalization_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/equalization_test.py",
"repo_id": "keras-cv",
"token_count": 1121
} | 13 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv import core
from keras_cv.api_export import keras_cv_export
from keras_cv.layers import preprocessing as cv_preprocessing
from keras_cv.layers.preprocessing.random_augmentation_pipeline import (
RandomAugmentationPipeline,
)
from keras_cv.utils import preprocessing as preprocessing_utils
@keras_cv_export("keras_cv.layers.RandAugment")
class RandAugment(RandomAugmentationPipeline):
"""RandAugment performs the Rand Augment operation on input images.
This layer can be thought of as an all-in-one image augmentation layer. The
policy implemented by this layer has been benchmarked extensively and is
effective on a wide variety of datasets.
The policy operates as follows:
For each augmentation in the range `[0, augmentations_per_image]`,
the policy selects a random operation from a list of operations.
It then samples a random number and if that number is less than
`rate` applies it to the given image.
References:
- [RandAugment](https://arxiv.org/abs/1909.13719)
Args:
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
augmentations_per_image: the number of layers to use in the rand augment
policy, defaults to `3`.
magnitude: magnitude is the mean of the normal distribution used to
sample the magnitude used for each data augmentation. Magnitude
should be a float in the range `[0, 1]`. A magnitude of `0`
indicates that the augmentations are as weak as possible (not
recommended), while a value of `1.0` implies use of the strongest
possible augmentation. All magnitudes are clipped to the range
`[0, 1]` after sampling. Defaults to `0.5`.
magnitude_stddev: the standard deviation to use when drawing values for
the perturbations. Keep in mind magnitude will still be clipped to
the range `[0, 1]` after samples are drawn from the normal
distribution. Defaults to `0.15`.
rate: the rate at which to apply each augmentation. This parameter is
applied on a per-distortion layer, per image. Should be in the range
`[0, 1]`. To reproduce the original RandAugment paper results, set
this to `10/11`. The original `RandAugment` paper includes an
Identity transform. By setting the rate to 10/11 in our
implementation, the behavior is identical to sampling an Identity
augmentation 10/11th of the time. Defaults to `1.0`.
geometric: whether to include geometric augmentations. This
should be set to False when performing object detection. Defaults to
True.
Usage:
```python
(x_test, y_test), _ = keras.datasets.cifar10.load_data()
rand_augment = keras_cv.layers.RandAugment(
value_range=(0, 255), augmentations_per_image=3, magnitude=0.5
)
x_test = rand_augment(x_test)
```
"""
def __init__(
self,
value_range,
augmentations_per_image=3,
magnitude=0.5,
magnitude_stddev=0.15,
rate=10 / 11,
geometric=True,
seed=None,
**kwargs,
):
# As an optimization RandAugment makes all internal layers use (0, 255)
# and we handle range transformation at the _augment level.
if magnitude < 0.0 or magnitude > 1:
raise ValueError(
"`magnitude` must be in the range [0, 1], got "
f"`magnitude={magnitude}`"
)
if magnitude_stddev < 0.0 or magnitude_stddev > 1:
raise ValueError(
"`magnitude_stddev` must be in the range [0, 1], got "
f"`magnitude_stddev={magnitude}`"
)
super().__init__(
layers=RandAugment.get_standard_policy(
(0, 255),
magnitude,
magnitude_stddev,
geometric=geometric,
seed=seed,
),
augmentations_per_image=augmentations_per_image,
rate=rate,
**kwargs,
seed=seed,
)
self.magnitude = float(magnitude)
self.value_range = value_range
self.seed = seed
self.geometric = geometric
self.magnitude_stddev = float(magnitude_stddev)
def _augment(self, sample):
sample["images"] = preprocessing_utils.transform_value_range(
sample["images"], self.value_range, (0, 255)
)
result = super()._augment(sample)
result["images"] = preprocessing_utils.transform_value_range(
result["images"], (0, 255), self.value_range
)
result["images"]
return result
@staticmethod
def get_standard_policy(
value_range, magnitude, magnitude_stddev, geometric=True, seed=None
):
policy = create_rand_augment_policy(magnitude, magnitude_stddev)
auto_contrast = cv_preprocessing.AutoContrast(
**policy["auto_contrast"], value_range=value_range, seed=seed
)
equalize = cv_preprocessing.Equalization(
**policy["equalize"], value_range=value_range, seed=seed
)
solarize = cv_preprocessing.Solarization(
**policy["solarize"], value_range=value_range, seed=seed
)
color = cv_preprocessing.RandomColorDegeneration(
**policy["color"], seed=seed
)
contrast = cv_preprocessing.RandomContrast(
**policy["contrast"], value_range=value_range, seed=seed
)
brightness = cv_preprocessing.RandomBrightness(
**policy["brightness"], value_range=value_range, seed=seed
)
layers = [
auto_contrast,
equalize,
solarize,
color,
contrast,
brightness,
]
if geometric:
shear_x = cv_preprocessing.RandomShear(
**policy["shear_x"], seed=seed
)
shear_y = cv_preprocessing.RandomShear(
**policy["shear_y"], seed=seed
)
translate_x = cv_preprocessing.RandomTranslation(
**policy["translate_x"], seed=seed
)
translate_y = cv_preprocessing.RandomTranslation(
**policy["translate_y"], seed=seed
)
layers += [shear_x, shear_y, translate_x, translate_y]
return layers
def get_config(self):
config = super().get_config()
config.update(
{
"value_range": self.value_range,
"augmentations_per_image": self.augmentations_per_image,
"magnitude": self.magnitude,
"magnitude_stddev": self.magnitude_stddev,
"rate": self.rate,
"geometric": self.geometric,
"seed": self.seed,
}
)
# layers is recreated in the constructor
del config["layers"]
return config
def auto_contrast_policy(magnitude, magnitude_stddev):
return {}
def equalize_policy(magnitude, magnitude_stddev):
return {}
def solarize_policy(magnitude, magnitude_stddev):
# We cap additions at 110, because if we add more than 110 we will be nearly
# nullifying the information contained in the image, making the model train
# on noise
maximum_addition_value = 110
addition_factor = core.NormalFactorSampler(
mean=magnitude * maximum_addition_value,
stddev=magnitude_stddev * maximum_addition_value,
min_value=0,
max_value=maximum_addition_value,
)
threshold_factor = core.NormalFactorSampler(
mean=(255 - (magnitude * 255)),
stddev=(magnitude_stddev * 255),
min_value=0,
max_value=255,
)
return {
"addition_factor": addition_factor,
"threshold_factor": threshold_factor,
}
def color_policy(magnitude, magnitude_stddev):
factor = core.NormalFactorSampler(
mean=magnitude,
stddev=magnitude_stddev,
min_value=0,
max_value=1,
)
return {"factor": factor}
def contrast_policy(magnitude, magnitude_stddev):
# TODO(lukewood): should we integrate RandomContrast with `factor`?
# RandomContrast layer errors when factor=0
factor = max(magnitude, 0.001)
return {"factor": factor}
def brightness_policy(magnitude, magnitude_stddev):
# TODO(lukewood): should we integrate RandomBrightness with `factor`?
return {"factor": magnitude}
def shear_x_policy(magnitude, magnitude_stddev):
factor = core.NormalFactorSampler(
mean=magnitude,
stddev=magnitude_stddev,
min_value=0,
max_value=1,
)
return {"x_factor": factor, "y_factor": 0}
def shear_y_policy(magnitude, magnitude_stddev):
factor = core.NormalFactorSampler(
mean=magnitude,
stddev=magnitude_stddev,
min_value=0,
max_value=1,
)
return {"x_factor": 0, "y_factor": factor}
def translate_x_policy(magnitude, magnitude_stddev):
# TODO(lukewood): should we integrate RandomTranslation with `factor`?
return {"width_factor": magnitude, "height_factor": 0}
def translate_y_policy(magnitude, magnitude_stddev):
# TODO(lukewood): should we integrate RandomTranslation with `factor`?
return {"width_factor": 0, "height_factor": magnitude}
POLICY_PAIRS = {
"auto_contrast": auto_contrast_policy,
"equalize": equalize_policy,
"solarize": solarize_policy,
"color": color_policy,
"contrast": contrast_policy,
"brightness": brightness_policy,
"shear_x": shear_x_policy,
"shear_y": shear_y_policy,
"translate_x": translate_x_policy,
"translate_y": translate_y_policy,
}
def create_rand_augment_policy(magnitude, magnitude_stddev):
result = {}
for name, policy_fn in POLICY_PAIRS.items():
result[name] = policy_fn(magnitude, magnitude_stddev)
return result
| keras-cv/keras_cv/layers/preprocessing/rand_augment.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/rand_augment.py",
"repo_id": "keras-cv",
"token_count": 4559
} | 14 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.layers import preprocessing
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
@keras_cv_export("keras_cv.layers.RandomColorJitter")
class RandomColorJitter(VectorizedBaseImageAugmentationLayer):
"""RandomColorJitter class randomly apply brightness, contrast, saturation
and hue image processing operation sequentially and randomly on the
input. It expects input as RGB image. The expected image should be
`(0-255)` pixel ranges.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `channels_last` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `channels_last` format
Args:
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
brightness_factor: Float or a list/tuple of 2 floats between -1.0
and 1.0. The factor is used to determine the lower bound and
upper bound of the brightness adjustment. A float value will be
chosen randomly between the limits. When -1.0 is chosen, the
output image will be black, and when 1.0 is chosen, the image
will be fully white. When only one float is provided, eg, 0.2,
then -0.2 will be used for lower bound and 0.2 will be used for
upper bound.
contrast_factor: A positive float represented as fraction of value,
or a tuple of size 2 representing lower and upper bound. When
represented as a single float, lower = upper. The contrast factor
will be randomly picked between `[1.0 - lower, 1.0 + upper]`.
saturation_factor: Either a tuple of two floats or a single float.
`factor` controls the extent to which the image saturation is
impacted. `factor=0.5` makes this layer perform a no-op operation.
`factor=0.0` makes the image to be fully grayscale. `factor=1.0`
makes the image to be fully saturated.
hue_factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image sharpness is impacted. `factor=0.0` makes this layer perform
a no-op operation, while a value of 1.0 performs the most aggressive
contrast adjustment available. If a tuple is used, a `factor` is
sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. In order to ensure the value is always the same, please
pass a tuple with two identical floats: `(0.5, 0.5)`.
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
color_jitter = keras_cv.layers.RandomColorJitter(
value_range=(0, 255),
brightness_factor=(-0.2, 0.5),
contrast_factor=(0.5, 0.9),
saturation_factor=(0.5, 0.9),
hue_factor=(0.5, 0.9),
)
augmented_images = color_jitter(images)
```
"""
def __init__(
self,
value_range,
brightness_factor,
contrast_factor,
saturation_factor,
hue_factor,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.value_range = value_range
self.brightness_factor = brightness_factor
self.contrast_factor = contrast_factor
self.saturation_factor = saturation_factor
self.hue_factor = hue_factor
self.seed = seed
self.random_brightness = preprocessing.RandomBrightness(
factor=self.brightness_factor, value_range=(0, 255), seed=self.seed
)
self.random_contrast = preprocessing.RandomContrast(
factor=self.contrast_factor, value_range=(0, 255), seed=self.seed
)
self.random_saturation = preprocessing.RandomSaturation(
factor=self.saturation_factor, seed=self.seed
)
self.random_hue = preprocessing.RandomHue(
factor=self.hue_factor, value_range=(0, 255), seed=self.seed
)
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(
images=image, transformations=transformation, **kwargs
)
def augment_images(self, images, transformations=None, **kwargs):
images = preprocessing_utils.transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
images = self.random_brightness(images)
images = self.random_contrast(images)
images = self.random_saturation(images)
images = self.random_hue(images)
images = preprocessing_utils.transform_value_range(
images,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
return images
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return bounding_boxes
def get_config(self):
config = super().get_config()
config.update(
{
"value_range": self.value_range,
"brightness_factor": self.brightness_factor,
"contrast_factor": self.contrast_factor,
"saturation_factor": self.saturation_factor,
"hue_factor": self.hue_factor,
"seed": self.seed,
}
)
return config
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_color_jitter.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_color_jitter.py",
"repo_id": "keras-cv",
"token_count": 2786
} | 15 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.RandomJpegQuality")
class RandomJpegQuality(BaseImageAugmentationLayer):
"""Applies Random Jpeg compression artifacts to an image.
Performs the jpeg compression algorithm on the image. This layer can be used
in order to ensure your model is robust to artifacts introduced by JPEG
compression.
Args:
factor: 2 element tuple or 2 element list. During augmentation, a random
number is drawn from the factor distribution. This value is passed to
`tf.image.adjust_jpeg_quality()`.
seed: Integer. Used to create a random seed.
Usage:
```python
layer = keras_cv.RandomJpegQuality(factor=(75, 100)))
(images, labels), _ = keras.datasets.cifar10.load_data()
augmented_images = layer(images)
```
"""
def __init__(self, factor, seed=None, **kwargs):
super().__init__(**kwargs)
if isinstance(factor, (float, int)):
raise ValueError(
"RandomJpegQuality() expects factor to be a 2 element "
"tuple, list or a `keras_cv.FactorSampler`. "
"RandomJpegQuality() received `factor={factor}`."
)
self.seed = seed
self.factor = preprocessing.parse_factor(
factor,
min_value=0,
max_value=100,
param_name="factor",
seed=self.seed,
)
def get_random_transformation(self, **kwargs):
return self.factor(dtype=tf.int32)
def augment_image(self, image, transformation=None, **kwargs):
jpeg_quality = transformation
return tf.image.adjust_jpeg_quality(image, jpeg_quality)
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor, "seed": self.seed})
return config
| keras-cv/keras_cv/layers/preprocessing/random_jpeg_quality.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_jpeg_quality.py",
"repo_id": "keras-cv",
"token_count": 1087
} | 16 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
# In order to support both unbatched and batched inputs, the horizontal
# and vertical axis is reverse indexed
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.layers.Rescaling")
class Rescaling(BaseImageAugmentationLayer):
"""A preprocessing layer which rescales input values to a new range.
This layer rescales every value of an input (often an image) by multiplying
by `scale` and adding `offset`.
For instance:
1. To rescale an input in the ``[0, 255]`` range
to be in the `[0, 1]` range, you would pass `scale=1./255`.
2. To rescale an input in the ``[0, 255]`` range to be in the `[-1, 1]`
range, you would pass `scale=1./127.5, offset=-1`.
Inputs can be of integer or floating point dtype, and by default the layer
will output floats.
Input shape:
Arbitrary.
Output shape:
Same as input.
Args:
scale: Float, the scale to apply to the inputs.
offset: Float, the offset to apply to the inputs.
"""
def __init__(self, scale, offset=0.0, **kwargs):
super().__init__(**kwargs)
self.scale = scale
self.offset = offset
def augment_image(self, image, transformation, **kwargs):
dtype = self.compute_dtype
scale = tf.cast(self.scale, dtype)
offset = tf.cast(self.offset, dtype)
return tf.cast(image, dtype) * scale + offset
def augment_label(self, label, transformation, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes
def get_config(self):
config = {
"scale": self.scale,
"offset": self.offset,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/rescaling.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/rescaling.py",
"repo_id": "keras-cv",
"token_count": 975
} | 17 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/__init__.py",
"repo_id": "keras-cv",
"token_count": 65
} | 18 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import os
import numpy as np
import pytest
import tensorflow as tf
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.group_points_by_bounding_boxes import ( # noqa: E501
GroupPointsByBoundingBoxes,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
OBJECT_POINT_CLOUDS = base_augmentation_layer_3d.OBJECT_POINT_CLOUDS
OBJECT_BOUNDING_BOXES = base_augmentation_layer_3d.OBJECT_BOUNDING_BOXES
class GroupPointsByBoundingBoxesTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = GroupPointsByBoundingBoxes(
label_index=1,
min_points_per_bounding_boxes=1,
max_points_per_bounding_boxes=2,
)
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
]
]
* 2
).astype("float32")
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[10, 1, 2, 2, 2, 2, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 1],
]
]
* 2
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
"dummy_item": np.random.uniform(size=(2, 2, 2)),
}
outputs = add_layer(inputs)
object_point_clouds = np.array(
[
[
[[0, 1, 2, 3, 4], [0, -1, 2, 3, 4]],
[[10, 1, 2, 3, 4], [0, 0, 0, 0, 0]],
]
]
* 2
).astype("float32")
object_bounding_boxes = np.array(
[[[0, 0, 0, 4, 4, 4, 0, 1], [10, 1, 2, 2, 2, 2, 0, 1]]] * 2
).astype("float32")
self.assertAllClose(inputs[POINT_CLOUDS], outputs[POINT_CLOUDS])
self.assertAllClose(inputs[BOUNDING_BOXES], outputs[BOUNDING_BOXES])
self.assertAllClose(inputs["dummy_item"], outputs["dummy_item"])
# Sort the point clouds due to the orders of points are different when
# using Tensorflow and Metal+Tensorflow (MAC).
outputs[OBJECT_POINT_CLOUDS] = tf.sort(
outputs[OBJECT_POINT_CLOUDS], axis=-2
)
object_point_clouds = tf.sort(object_point_clouds, axis=-2)
self.assertAllClose(outputs[OBJECT_POINT_CLOUDS], object_point_clouds)
self.assertAllClose(
outputs[OBJECT_BOUNDING_BOXES], object_bounding_boxes
)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GroupPointsByBoundingBoxes(
label_index=1,
min_points_per_bounding_boxes=1,
max_points_per_bounding_boxes=2,
)
point_clouds = np.array(
[
[
[
[0, 1, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
]
]
* 2
]
* 3
).astype("float32")
bounding_boxes = np.array(
[
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[10, 1, 2, 2, 2, 2, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 1],
]
]
* 2
]
* 3
).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
object_point_clouds = np.array(
[
[
[[0, 1, 2, 3, 4], [0, -1, 2, 3, 4]],
[[10, 1, 2, 3, 4], [0, 0, 0, 0, 0]],
]
* 3
]
* 2
).astype("float32")
object_bounding_boxes = np.array(
[[[0, 0, 0, 4, 4, 4, 0, 1], [10, 1, 2, 2, 2, 2, 0, 1]] * 3] * 2
).astype("float32")
self.assertAllClose(inputs[POINT_CLOUDS], outputs[POINT_CLOUDS])
self.assertAllClose(inputs[BOUNDING_BOXES], outputs[BOUNDING_BOXES])
# Sort the point clouds due to the orders of points are different when
# using Tensorflow and Metal+Tensorflow (MAC).
outputs[OBJECT_POINT_CLOUDS] = tf.sort(
outputs[OBJECT_POINT_CLOUDS], axis=-2
)
object_point_clouds = tf.sort(object_point_clouds, axis=-2)
self.assertAllClose(outputs[OBJECT_POINT_CLOUDS], object_point_clouds)
self.assertAllClose(
outputs[OBJECT_BOUNDING_BOXES], object_bounding_boxes
)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_augment_point_clouds_and_bounding_boxes_v2(self):
add_layer = GroupPointsByBoundingBoxes(
label_index=1,
min_points_per_bounding_boxes=1,
max_points_per_bounding_boxes=2,
)
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
]
]
* 2
).astype("float32")
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[10, 1, 2, 2, 2, 2, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 1],
]
]
* 2
).astype("float32")
point_clouds = tf.convert_to_tensor(point_clouds)
bounding_boxes = tf.convert_to_tensor(bounding_boxes)
outputs = add_layer.augment_point_clouds_bounding_boxes_v2(
point_clouds=point_clouds, bounding_boxes=bounding_boxes
)
object_point_clouds, object_bounding_boxes = outputs[0], outputs[1]
expected_object_point_clouds = np.array(
[
[
[[0, 1, 2, 3, 4], [0, -1, 2, 3, 4]],
[[10, 1, 2, 3, 4], [0, 0, 0, 0, 0]],
]
]
* 2
).astype("float32")
expected_object_bounding_boxes = np.array(
[[[0, 0, 0, 4, 4, 4, 0, 1], [10, 1, 2, 2, 2, 2, 0, 1]]] * 2
).astype("float32")
self.assertAllClose(
expected_object_point_clouds, object_point_clouds.to_tensor()
)
self.assertAllClose(
expected_object_bounding_boxes, object_bounding_boxes.to_tensor()
)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/group_points_by_bounding_boxes_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/group_points_by_bounding_boxes_test.py",
"repo_id": "keras-cv",
"token_count": 4159
} | 19 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.layers.SegFormerMultiheadAttention")
class SegFormerMultiheadAttention(keras.layers.Layer):
def __init__(self, project_dim, num_heads, sr_ratio):
"""
Efficient MultiHeadAttention implementation as a Keras layer.
A huge bottleneck in scaling transformers is the self-attention layer
with an O(n^2) complexity.
SegFormerMultiheadAttention performs a sequence reduction (SR) operation
with a given ratio, to reduce the sequence length before performing key and value projections,
reducing the O(n^2) complexity to O(n^2/R) where R is the sequence reduction ratio.
References:
- [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) (CVPR 2021) # noqa: E501
- [NVlabs' official implementation](https://github.com/NVlabs/SegFormer/blob/master/mmseg/models/backbones/mix_transformer.py) # noqa: E501
- [@sithu31296's reimplementation](https://github.com/sithu31296/semantic-segmentation/blob/main/semseg/models/backbones/mit.py) # noqa: E501
- [Ported from the TensorFlow implementation from DeepVision](https://github.com/DavidLandup0/deepvision/blob/main/deepvision/layers/efficient_attention.py) # noqa: E501
Args:
project_dim: integer, the dimensionality of the projection
of the `SegFormerMultiheadAttention` layer.
num_heads: integer, the number of heads to use in the
attention computation.
sr_ratio: integer, the sequence reduction ratio to perform
on the sequence before key and value projections.
Basic usage:
```
tensor = tf.random.uniform([1, 196, 32])
output = keras_cv.layers.SegFormerMultiheadAttention(project_dim=768,
num_heads=2,
sr_ratio=4)(tensor)
print(output.shape) # (1, 196, 32)
```
"""
super().__init__()
self.num_heads = num_heads
self.sr_ratio = sr_ratio
self.scale = (project_dim // num_heads) ** -0.5
self.q = keras.layers.Dense(project_dim)
self.k = keras.layers.Dense(project_dim)
self.v = keras.layers.Dense(project_dim)
self.proj = keras.layers.Dense(project_dim)
if sr_ratio > 1:
self.sr = keras.layers.Conv2D(
filters=project_dim,
kernel_size=sr_ratio,
strides=sr_ratio,
padding="same",
)
self.norm = keras.layers.LayerNormalization()
def call(self, x):
input_shape = ops.shape(x)
H, W = int(math.sqrt(input_shape[1])), int(math.sqrt(input_shape[1]))
B, C = input_shape[0], input_shape[2]
q = self.q(x)
q = ops.reshape(
q,
(
input_shape[0],
input_shape[1],
self.num_heads,
input_shape[2] // self.num_heads,
),
)
q = ops.transpose(q, [0, 2, 1, 3])
if self.sr_ratio > 1:
x = ops.reshape(
ops.transpose(x, [0, 2, 1]),
(B, H, W, C),
)
x = self.sr(x)
x = ops.reshape(x, [input_shape[0], input_shape[2], -1])
x = ops.transpose(x, [0, 2, 1])
x = self.norm(x)
k = self.k(x)
v = self.v(x)
k = ops.transpose(
ops.reshape(
k,
[B, -1, self.num_heads, C // self.num_heads],
),
[0, 2, 1, 3],
)
v = ops.transpose(
ops.reshape(
v,
[B, -1, self.num_heads, C // self.num_heads],
),
[0, 2, 1, 3],
)
attn = (q @ ops.transpose(k, [0, 1, 3, 2])) * self.scale
attn = ops.nn.softmax(attn, axis=-1)
attn = attn @ v
attn = ops.reshape(
ops.transpose(attn, [0, 2, 1, 3]),
[input_shape[0], input_shape[1], input_shape[2]],
)
x = self.proj(attn)
return x
| keras-cv/keras_cv/layers/segformer_multihead_attention.py/0 | {
"file_path": "keras-cv/keras_cv/layers/segformer_multihead_attention.py",
"repo_id": "keras-cv",
"token_count": 2401
} | 20 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.losses import FocalLoss
from keras_cv.tests.test_case import TestCase
class FocalTest(TestCase):
def test_output_shape(self):
y_true = np.random.uniform(size=[2, 5], low=0, high=1)
y_pred = np.random.uniform(size=[2, 5], low=0, high=1)
focal_loss = FocalLoss(reduction="sum")
self.assertAllEqual(focal_loss(y_true, y_pred).shape, [])
def test_output_shape_reduction_none(self):
y_true = np.random.uniform(size=[2, 5], low=0, high=1)
y_pred = np.random.uniform(size=[2, 5], low=0, high=1)
focal_loss = FocalLoss(reduction="none")
self.assertAllEqual(
focal_loss(y_true, y_pred).shape,
[
2,
],
)
def test_output_shape_from_logits(self):
y_true = np.random.uniform(size=[2, 5], low=0, high=1)
y_pred = np.random.uniform(size=[2, 5], low=-10, high=10)
focal_loss = FocalLoss(reduction="none", from_logits=True)
self.assertAllEqual(
focal_loss(y_true, y_pred).shape,
[
2,
],
)
def test_from_logits_argument(self):
rng = np.random.default_rng(1337)
y_true = rng.uniform(size=(2, 8, 10)).astype("float64")
y_logits = rng.uniform(low=-1000, high=1000, size=(2, 8, 10)).astype(
"float64"
)
y_pred = ops.cast(ops.sigmoid(y_logits), "float32")
focal_loss_on_logits = FocalLoss(from_logits=True)
focal_loss = FocalLoss()
# TODO(ianstenbit): This probably warrants some more investigation.
# In the current implementation, I've verified that training RetinaNet
# works in all backends with this implementation.
# TF backend somehow has different numerics.
expected_loss = (
31.11176
if keras_3() and keras.backend.backend() != "tensorflow"
else 925.28081
)
self.assertAllClose(
focal_loss_on_logits(y_true, y_logits), expected_loss
)
self.assertAllClose(focal_loss(y_true, y_pred), 31.11176)
| keras-cv/keras_cv/losses/focal_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/focal_test.py",
"repo_id": "keras-cv",
"token_count": 1246
} | 21 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.models.backbones.densenet.densenet_backbone import (
DenseNetBackbone,
)
from keras_cv.models.backbones.densenet.densenet_backbone_presets import (
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """DenseNetBackbone model with {num_layers} layers.
Reference:
- [Densely Connected Convolutional Networks (CVPR 2017)](https://arxiv.org/abs/1608.06993)
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
model = DenseNet{num_layers}Backbone()
output = model(input_data)
```
""" # noqa: E501
@keras_cv_export("keras_cv.models.DenseNet121Backbone")
class DenseNet121Backbone(DenseNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return DenseNetBackbone.from_preset("densenet121", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"densenet121_imagenet": copy.deepcopy(
backbone_presets["densenet121_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include weights.""" # noqa: E501
return cls.presets
@keras_cv_export("keras_cv.models.DenseNet169Backbone")
class DenseNet169Backbone(DenseNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return DenseNetBackbone.from_preset("densenet169", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"densenet169_imagenet": copy.deepcopy(
backbone_presets["densenet169_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include weights.""" # noqa: E501
return cls.presets
@keras_cv_export("keras_cv.models.DenseNet201Backbone")
class DenseNet201Backbone(DenseNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return DenseNetBackbone.from_preset("densenet201", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"densenet201_imagenet": copy.deepcopy(
backbone_presets["densenet201_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include weights.""" # noqa: E501
return cls.presets
setattr(DenseNet121Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=121))
setattr(DenseNet169Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=169))
setattr(DenseNet201Backbone, "__doc__", ALIAS_DOCSTRING.format(num_layers=201))
| keras-cv/keras_cv/models/backbones/densenet/densenet_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/densenet/densenet_aliases.py",
"repo_id": "keras-cv",
"token_count": 2183
} | 22 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B0Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_backbone import (
EfficientNetV1Backbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class EfficientNetV1BackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(8, 224, 224, 3))
def test_valid_call(self):
model = EfficientNetV1Backbone(
stackwise_kernel_sizes=[3, 3, 5, 3, 5, 5, 3],
stackwise_num_repeats=[1, 2, 2, 3, 3, 4, 1],
stackwise_input_filters=[32, 16, 24, 40, 80, 112, 192],
stackwise_output_filters=[16, 24, 40, 80, 112, 192, 320],
stackwise_expansion_ratios=[1, 6, 6, 6, 6, 6, 6],
stackwise_strides=[1, 2, 2, 2, 1, 2, 1],
stackwise_squeeze_and_excite_ratios=[
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_alias_model_with_rescaling(self):
model = EfficientNetV1B0Backbone(include_rescaling=True)
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = EfficientNetV1Backbone(
stackwise_kernel_sizes=[3, 3, 5, 3, 5, 5, 3],
stackwise_num_repeats=[1, 2, 2, 3, 3, 4, 1],
stackwise_input_filters=[32, 16, 24, 40, 80, 112, 192],
stackwise_output_filters=[16, 24, 40, 80, 112, 192, 320],
stackwise_expansion_ratios=[1, 6, 6, 6, 6, 6, 6],
stackwise_strides=[1, 2, 2, 2, 1, 2, 1],
stackwise_squeeze_and_excite_ratios=[
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = EfficientNetV1Backbone(
stackwise_kernel_sizes=[3, 3, 5, 3, 5, 5, 3],
stackwise_num_repeats=[1, 2, 2, 3, 3, 4, 1],
stackwise_input_filters=[32, 16, 24, 40, 80, 112, 192],
stackwise_output_filters=[16, 24, 40, 80, 112, 192, 320],
stackwise_expansion_ratios=[1, 6, 6, 6, 6, 6, 6],
stackwise_strides=[1, 2, 2, 2, 1, 2, 1],
stackwise_squeeze_and_excite_ratios=[
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=True,
)
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "efficientnet_v1_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, EfficientNetV1Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(model_output, restored_output)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_alias_model(self):
model = EfficientNetV1B0Backbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "efficientnet_v1_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
# Note that these aliases serialized as the base class
self.assertIsInstance(restored_model, EfficientNetV1Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(model_output, restored_output)
def test_feature_pyramid_inputs(self):
model = EfficientNetV1B0Backbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P1", "P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P1"].shape,
(None, input_size // 2**1, input_size // 2**1, 16),
)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 24),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 40),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 112),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 1280),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
model = EfficientNetV1Backbone(
stackwise_kernel_sizes=[3, 3, 5, 3, 5, 5, 3],
stackwise_num_repeats=[1, 2, 2, 3, 3, 4, 1],
stackwise_input_filters=[32, 16, 24, 40, 80, 112, 192],
stackwise_output_filters=[16, 24, 40, 80, 112, 192, 320],
stackwise_expansion_ratios=[1, 6, 6, 6, 6, 6, 6],
stackwise_strides=[1, 2, 2, 2, 1, 2, 1],
stackwise_squeeze_and_excite_ratios=[
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=True,
)
self.assertEqual(model.output_shape, (None, None, None, 1280))
| keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 3658
} | 23 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileNetV3 model preset configurations."""
backbone_presets_no_weights = {
"mobilenet_v3_small": {
"metadata": {
"description": (
"MobileNetV3 model with 14 layers where the batch "
"normalization and hard-swish activation are applied after the "
"convolution layers."
),
"params": 933502,
"official_name": "MobileNetV3",
"path": "mobilenetv3",
},
"kaggle_handle": "kaggle://keras/mobilenetv3/keras/mobilenet_v3_small/2", # noqa: E501
},
"mobilenet_v3_large": {
"metadata": {
"description": (
"MobileNetV3 model with 28 layers where the batch "
"normalization and hard-swish activation are applied after the "
"convolution layers."
),
"params": 2994518,
"official_name": "MobileNetV3",
"path": "mobilenetv3",
},
"kaggle_handle": "kaggle://keras/mobilenetv3/keras/mobilenet_v3_large/2", # noqa: E501
},
}
backbone_presets_with_weights = {
"mobilenet_v3_large_imagenet": {
"metadata": {
"description": (
"MobileNetV3 model with 28 layers where the batch "
"normalization and hard-swish activation are applied after the "
"convolution layers. "
"Pre-trained on the ImageNet 2012 classification task."
),
"params": 2994518,
"official_name": "MobileNetV3",
"path": "mobilenetv3",
},
"kaggle_handle": "kaggle://keras/mobilenetv3/keras/mobilenet_v3_large_imagenet/2", # noqa: E501
},
"mobilenet_v3_small_imagenet": {
"metadata": {
"description": (
"MobileNetV3 model with 14 layers where the batch "
"normalization and hard-swish activation are applied after the "
"convolution layers. "
"Pre-trained on the ImageNet 2012 classification task."
),
"params": 933502,
"official_name": "MobileNetV3",
"path": "mobilenetv3",
},
"kaggle_handle": "kaggle://keras/mobilenetv3/keras/mobilenet_v3_small_imagenet/2", # noqa: E501
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1380
} | 24 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.backend import keras
from keras_cv.backend import ops
def get_initializer(initializer_range=0.02):
"""
Creates a `keras.initializers.TruncatedNormal` with the given range.
Args:
initializer_range (*float*, defaults to 0.02): Standard deviation of the
initializer range.
Returns:
`keras.initializers.TruncatedNormal`: The truncated normal initializer.
"""
return keras.initializers.TruncatedNormal(stddev=initializer_range)
class QuickGELU(keras.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, x):
return x * ops.sigmoid(1.702 * x)
class ResidualAttention(keras.layers.Layer):
def __init__(
self,
proj_dim,
num_heads,
num_hidden_layers,
**kwargs,
):
super().__init__(**kwargs)
self.proj_dim = proj_dim
self.num_heads = num_heads
self.num_hidden_layers = num_hidden_layers
self.fc_std = np.power(2 * self.proj_dim, -0.5) * 0.02
self.in_proj_std = (
np.power(self.proj_dim, -0.5)
* (np.power(2 * self.num_hidden_layers, -0.5))
* 0.02
)
self.attn = CLIPAttention(
self.proj_dim,
self.num_heads,
self.num_hidden_layers,
name="multi_head_attention",
)
self.ln_1 = keras.layers.LayerNormalization(epsilon=1e-5, name="ln_1")
self.mlp_dense_1 = keras.layers.Dense(
self.proj_dim * 4,
name="c_fc",
)
self.mlp_activation = QuickGELU(name="gelu")
self.mlp_dense_2 = keras.layers.Dense(
self.proj_dim,
name="c_proj",
)
self.ln_2 = keras.layers.LayerNormalization(epsilon=1e-5, name="ln_2")
def attention(self, x, causal_attention_mask=None, attention_mask=None):
mask = None
if causal_attention_mask is not None:
mask = (
ops.cast(causal_attention_mask, dtype=x.dtype)
if causal_attention_mask is not None
else None
)
if attention_mask is not None:
attention_mask = (
ops.cast(attention_mask, dtype=x.dtype)
if attention_mask is not None
else None
)
mask = ops.add(causal_attention_mask, attention_mask)
return self.attn(
x,
attention_mask=mask,
)[0]
def build(self, input_shape):
super().build(input_shape)
self.attn.build(None)
self.ln_1.build([None, None, self.proj_dim])
self.mlp_dense_1.build([None, None, self.proj_dim])
self.mlp_dense_2.build([None, None, self.proj_dim * 4])
self.ln_2.build([None, None, self.proj_dim])
def call(self, x, causal_attention_mask=None, attention_mask=None):
residual = x
x = self.ln_1(x)
x = self.attention(
x,
causal_attention_mask=causal_attention_mask,
attention_mask=attention_mask,
)
x = x + residual
residual = x
x = self.mlp_dense_1(self.ln_2(residual))
x = self.mlp_activation(x)
x = self.mlp_dense_2(x)
x = residual + x
return x
def compute_output_shape(self, inputs_shape):
return inputs_shape
def get_config(self):
config = super().get_config()
config.update(
{
"proj_dim": self.proj_dim,
"num_heads": self.num_heads,
"num_hidden_layers": self.num_hidden_layers,
}
)
return config
class CLIPEncoder(keras.layers.Layer):
def __init__(self, width, num_layers, heads, **kwargs):
super().__init__(**kwargs)
self.width = width
self.num_layers = num_layers
self.heads = heads
self.resblocks = [
ResidualAttention(
self.width,
self.heads,
self.num_layers,
)
for _ in range(self.num_layers)
]
def build(self, input_shape):
super().build(input_shape)
for block in self.resblocks:
block.build(input_shape)
def call(
self,
x,
causal_attention_mask=None,
attention_mask=None,
):
for block in self.resblocks:
x = block(
x,
causal_attention_mask=causal_attention_mask,
attention_mask=attention_mask,
)
return x
def compute_output_shape(self, inputs_shape):
return inputs_shape
def get_config(self):
config = super().get_config()
config.update(
{
"width": self.width,
"num_layers": self.num_layers,
"heads": self.heads,
}
)
return config
class CLIPAttention(keras.layers.Layer):
"""
Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/modeling_clip.py # noqa: E501
"""
def __init__(
self, proj_dim, num_heads, num_hidden_layers, dropout=0.0, **kwargs
):
super().__init__(**kwargs)
self.proj_dim = proj_dim
self.num_heads = num_heads
self.num_hidden_layers = num_hidden_layers
self.dropout = dropout
self.head_dim = self.proj_dim // self.num_heads
if self.head_dim * self.num_heads != self.proj_dim:
raise ValueError(
f"proj_dim must be divisible by num_heads (got `proj_dim`"
f": {self.proj_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
in_proj_std = (
(self.proj_dim**-0.5)
* ((2 * self.num_hidden_layers) ** -0.5)
* 0.02
)
out_proj_std = (self.proj_dim**-0.5) * 0.02
self.q_proj = keras.layers.Dense(
units=self.proj_dim,
kernel_initializer=get_initializer(in_proj_std),
name="q_proj",
)
self.k_proj = keras.layers.Dense(
units=self.proj_dim,
kernel_initializer=get_initializer(in_proj_std),
name="k_proj",
)
self.v_proj = keras.layers.Dense(
units=self.proj_dim,
kernel_initializer=get_initializer(in_proj_std),
name="v_proj",
)
self.out_proj = keras.layers.Dense(
units=self.proj_dim,
kernel_initializer=get_initializer(out_proj_std),
name="out_proj",
)
def build(self, input_shape):
super().build(input_shape)
self.q_proj.build([None, None, self.proj_dim])
self.k_proj.build([None, None, self.proj_dim])
self.v_proj.build([None, None, self.proj_dim])
self.out_proj.build([None, None, self.proj_dim])
def _transpose_for_scores(self, tensor, batch_size):
"""
Adapted from https://github.com/huggingface/transformers/blob/8e164c5400b7b413c7b8fb32e35132001effc970/src/transformers/models/bert/modeling_tf_bert.py#L252 # noqa: E501
"""
# [batch_size, seq_len, all_head_dim] ->
# [batch_size, seq_len, num_heads, head_dim]
tensor = ops.reshape(
tensor, (batch_size, -1, self.num_heads, self.head_dim)
)
# [batch_size, seq_len, num_heads, head_dim] ->
# [batch_size, num_heads, seq_len, head_dim]
return ops.transpose(tensor, axes=[0, 2, 1, 3])
def call(
self,
x,
attention_mask=None,
output_attentions=None,
training=False,
):
batch_size = ops.shape(x)[0]
mixed_query_layer = self.q_proj(inputs=x)
mixed_key_layer = self.k_proj(inputs=x)
mixed_value_layer = self.v_proj(inputs=x)
query_layer = self._transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self._transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self._transpose_for_scores(mixed_value_layer, batch_size)
# Scaled dot product between key and query = raw attention scores.
attention_scores = ops.matmul(
query_layer, ops.transpose(key_layer, axes=[0, 1, 3, 2])
)
dk = ops.cast(ops.sqrt(self.head_dim), dtype=attention_scores.dtype)
attention_scores = ops.divide(
attention_scores, dk
) # (batch_size, num_heads, seq_len_q, seq_len_k)
if attention_mask is not None:
# Apply the attention mask (precomputed for all layers in the
# call() function)
attention_scores = ops.add(attention_scores, attention_mask)
# Normalize the attention scores to probabilities.
attention_probs = ops.softmax(attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
dropout_attention_probs = keras.layers.Dropout(self.dropout)(
inputs=attention_probs, training=training
)
attn_output = ops.matmul(dropout_attention_probs, value_layer)
attn_output = ops.transpose(attn_output, axes=[0, 2, 1, 3])
# (batch_size, seq_len_q, proj_dim)
attn_output = ops.reshape(attn_output, (batch_size, -1, self.proj_dim))
attn_output = self.out_proj(attn_output, training=training)
outputs = (
(attn_output, attention_probs)
if output_attentions
else (attn_output,)
)
return outputs
def get_config(self):
config = super().get_config()
config.update(
{
"proj_dim": self.proj_dim,
"num_heads": self.num_heads,
"num_hidden_layers": self.num_hidden_layers,
"dropout": self.dropout,
}
)
return config
| keras-cv/keras_cv/models/feature_extractor/clip/clip_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_encoder.py",
"repo_id": "keras-cv",
"token_count": 5332
} | 25 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MLP Mixer models for KerasCV.
Reference:
- [MLP-Mixer: An all-MLP Architecture for Vision](https://arxiv.org/abs/2105.01601)
""" # noqa: E501
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend
from tensorflow.keras import layers
from keras_cv.models.legacy import utils
MODEL_CONFIGS = {
"MLPMixerB16": {
"patch_size": 16,
"num_blocks": 12,
"hidden_dim": 768,
"tokens_mlp_dim": 384,
"channels_mlp_dim": 3072,
},
"MLPMixerB32": {
"patch_size": 32,
"num_blocks": 12,
"hidden_dim": 768,
"tokens_mlp_dim": 384,
"channels_mlp_dim": 3072,
},
"MLPMixerL16": {
"patch_size": 16,
"num_blocks": 24,
"hidden_dim": 1024,
"tokens_mlp_dim": 512,
"channels_mlp_dim": 4096,
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [MLP-Mixer: An all-MLP Architecture for Vision](https://arxiv.org/abs/2105.01601)
This class represents a Keras {name} model.
For transfer learning use cases, make sure to read the [guide to transfer
learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
include_top: bool, whether to include the fully-connected layer at the
top of the network. If provided, num_classes must be provided.
num_classes: integer, optional number of classes to classify images
into. Only to be specified if `include_top` is True.
weights: one of `None` (random initialization), a pretrained weight file
path, or a reference to pre-trained weights (e.g.
'imagenet/classification')(see available pre-trained weights in
weights.py)
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
name: string, optional name to pass to the model, defaults to "{name}".
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
Returns:
A `keras.Model` instance.
""" # noqa: E501
def apply_mlp_block(x, mlp_dim, name=None):
"""An MLP block consisting of two linear layers with GELU activation in
between.
Args:
x: input tensor.
mlp_dim: integer, the number of units to be present in the first layer.
name: string, block label.
Returns:
the updated input tensor.
"""
if name is None:
name = f"mlp_block_{backend.get_uid('mlp_block')}"
y = layers.Dense(mlp_dim, name=f"{name}_dense_1")(x)
y = layers.Activation("gelu", name=f"{name}_gelu")(y)
return layers.Dense(x.shape[-1], name=f"{name}_dense_2")(y)
def apply_mixer_block(x, tokens_mlp_dim, channels_mlp_dim, name=None):
"""A mixer block.
Args:
x: input tensor.
tokens_mlp_dim: integer, number of units to be present in the MLP block
dealing with tokens.
channels_mlp_dim: integer, number of units to be present in the MLP block
dealing with channels.
name: string, block label.
Returns:
the updated input tensor.
"""
if name is None:
name = f"mixer_block_{backend.get_uid('mlp_block')}"
y = layers.LayerNormalization()(x)
y = layers.Permute((2, 1))(y)
y = apply_mlp_block(y, tokens_mlp_dim, name=f"{name}_token_mixing")
y = layers.Permute((2, 1))(y)
x = layers.Add()([x, y])
y = layers.LayerNormalization()(x)
y = apply_mlp_block(y, channels_mlp_dim, name=f"{name}_channel_mixing")
return layers.Add()([x, y])
@keras.utils.register_keras_serializable(package="keras_cv.models")
class MLPMixer(keras.Model):
"""Instantiates the MLP Mixer architecture.
Args:
input_shape: tuple denoting the input shape, (224, 224, 3) for example.
patch_size: integer denoting the size of the patches to be extracted
from the inputs (16 for extracting 16x16 patches for example).
num_blocks: integer, number of mixer blocks.
hidden_dim: integer, dimension to which the patches will be linearly
projected.
tokens_mlp_dim: integer, dimension of the MLP block responsible for
tokens.
channels_mlp_dim: integer, dimension of the MLP block responsible for
channels.
include_rescaling: whether to rescale the inputs. If set to True,
inputs will be passed through a `Rescaling(1/255.0)` layer.
include_top: bool, whether to include the fully-connected
layer at the top of the network. If provided, num_classes must be
provided.
num_classes: integer, optional number of classes to classify images
into. Only to be specified if `include_top` is True.
weights: one of `None` (random initialization) or a pretrained
weight file path.
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
name: string, optional name to pass to the model, defaults to "MLPMixer".
Returns:
A `keras.Model` instance.
"""
def __init__(
self,
input_shape,
patch_size,
num_blocks,
hidden_dim,
tokens_mlp_dim,
channels_mlp_dim,
include_rescaling,
include_top,
num_classes=None,
input_tensor=None,
weights=None,
pooling=None,
classifier_activation="softmax",
name="MLPMixer",
**kwargs,
):
if weights and not tf.io.gfile.exists(weights):
raise ValueError(
"The `weights` argument should be either "
"`None` or the path to the weights file to be loaded. "
f"Weights file not found at location: {weights}"
)
if include_top and not num_classes:
raise ValueError(
"If `include_top` is True, "
"you should specify `num_classes`. "
f"Received: num_classes={num_classes}"
)
if not isinstance(input_shape, tuple):
raise ValueError("`input_shape` needs to be tuple.")
if len(input_shape) != 3:
raise ValueError(
"`input_shape` needs to contain dimensions for three"
" axes: height, width, and channel ((224, 224, 3) for example)."
)
if input_shape[0] != input_shape[1]:
raise ValueError("Non-uniform resolutions are not supported.")
if input_shape[0] % patch_size != 0:
raise ValueError(
"Input resolution should be divisible by the patch size."
)
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = layers.Rescaling(1 / 255.0)(x)
x = layers.Conv2D(
filters=hidden_dim,
kernel_size=(patch_size, patch_size),
strides=(patch_size, patch_size),
padding="valid",
name="patchify_and_projection",
)(x)
x = layers.Reshape((x.shape[1] * x.shape[2], x.shape[3]))(x)
for i in range(num_blocks):
x = apply_mixer_block(
x, tokens_mlp_dim, channels_mlp_dim, name=f"mixer_block_{i}"
)
x = layers.LayerNormalization()(x)
if include_top:
x = layers.GlobalAveragePooling1D(name="avg_pool")(x)
x = layers.Dense(
num_classes,
activation=classifier_activation,
name="predictions",
)(x)
elif pooling == "avg":
x = layers.GlobalAveragePooling1D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling1D(name="max_pool")(x)
super().__init__(inputs=inputs, outputs=x, name=name, **kwargs)
if weights is not None:
self.load_weights(weights)
self.patch_size = patch_size
self.num_blocks = num_blocks
self.hidden_dim = hidden_dim
self.tokens_mlp_dim = tokens_mlp_dim
self.channels_mlp_dim = channels_mlp_dim
self.include_rescaling = include_rescaling
self.include_top = include_top
self.num_classes = num_classes
self.input_tensor = input_tensor
self.pooling = pooling
self.classifier_activation = classifier_activation
def get_config(self):
return {
"input_shape": self.input_shape[1:],
"patch_size": self.patch_size,
"num_blocks": self.num_blocks,
"hidden_dim": self.hidden_dim,
"tokens_mlp_dim": self.tokens_mlp_dim,
"channels_mlp_dim": self.channels_mlp_dim,
"include_rescaling": self.include_rescaling,
"include_top": self.include_top,
"num_classes": self.num_classes,
"input_tensor": self.input_tensor,
"pooling": self.pooling,
"classifier_activation": self.classifier_activation,
"name": self.name,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def MLPMixerB16(
input_shape,
*,
include_rescaling,
include_top,
num_classes=None,
input_tensor=None,
weights=None,
pooling=None,
name="MLPMixerB16",
**kwargs,
):
"""Instantiates the MLPMixerB16 architecture."""
return MLPMixer(
input_shape=input_shape,
patch_size=MODEL_CONFIGS["MLPMixerB16"]["patch_size"],
num_blocks=MODEL_CONFIGS["MLPMixerB16"]["num_blocks"],
hidden_dim=MODEL_CONFIGS["MLPMixerB16"]["hidden_dim"],
tokens_mlp_dim=MODEL_CONFIGS["MLPMixerB16"]["tokens_mlp_dim"],
channels_mlp_dim=MODEL_CONFIGS["MLPMixerB16"]["channels_mlp_dim"],
include_rescaling=include_rescaling,
include_top=include_top,
num_classes=num_classes,
input_tensor=input_tensor,
weights=weights,
pooling=pooling,
name=name,
**kwargs,
)
def MLPMixerB32(
input_shape,
*,
include_rescaling,
include_top,
num_classes=None,
input_tensor=None,
weights=None,
pooling=None,
name="MLPMixerB32",
**kwargs,
):
"""Instantiates the MLPMixerB32 architecture."""
return MLPMixer(
input_shape=input_shape,
patch_size=MODEL_CONFIGS["MLPMixerB32"]["patch_size"],
num_blocks=MODEL_CONFIGS["MLPMixerB32"]["num_blocks"],
hidden_dim=MODEL_CONFIGS["MLPMixerB32"]["hidden_dim"],
tokens_mlp_dim=MODEL_CONFIGS["MLPMixerB32"]["tokens_mlp_dim"],
channels_mlp_dim=MODEL_CONFIGS["MLPMixerB32"]["channels_mlp_dim"],
include_rescaling=include_rescaling,
include_top=include_top,
num_classes=num_classes,
input_tensor=input_tensor,
weights=weights,
pooling=pooling,
name=name,
**kwargs,
)
def MLPMixerL16(
input_shape,
*,
include_rescaling,
include_top,
num_classes=None,
input_tensor=None,
weights=None,
pooling=None,
name="MLPMixerL16",
**kwargs,
):
"""Instantiates the MLPMixerL16 architecture."""
return MLPMixer(
input_shape=input_shape,
patch_size=MODEL_CONFIGS["MLPMixerL16"]["patch_size"],
num_blocks=MODEL_CONFIGS["MLPMixerL16"]["num_blocks"],
hidden_dim=MODEL_CONFIGS["MLPMixerL16"]["hidden_dim"],
tokens_mlp_dim=MODEL_CONFIGS["MLPMixerL16"]["tokens_mlp_dim"],
channels_mlp_dim=MODEL_CONFIGS["MLPMixerL16"]["channels_mlp_dim"],
include_rescaling=include_rescaling,
include_top=include_top,
num_classes=num_classes,
input_tensor=input_tensor,
weights=weights,
pooling=pooling,
name=name,
**kwargs,
)
setattr(MLPMixerB16, "__doc__", BASE_DOCSTRING.format(name="MLPMixerB16"))
setattr(MLPMixerB32, "__doc__", BASE_DOCSTRING.format(name="MLPMixerB32"))
setattr(MLPMixerL16, "__doc__", BASE_DOCSTRING.format(name="MLPMixerL16"))
| keras-cv/keras_cv/models/legacy/mlp_mixer.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/mlp_mixer.py",
"repo_id": "keras-cv",
"token_count": 6411
} | 26 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ViT (Vision Transformer) models for Keras.
Reference:
- [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929v2)
(ICLR 2021)
- [How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers](https://arxiv.org/abs/2106.10270)
(CoRR 2021)
""" # noqa: E501
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras_cv.layers import TransformerEncoder
from keras_cv.layers.vit_layers import PatchingAndEmbedding
from keras_cv.models.legacy import utils
from keras_cv.models.legacy.weights import parse_weights
MODEL_CONFIGS = {
"ViTTiny16": {
"patch_size": 16,
"transformer_layer_num": 12,
"project_dim": 192,
"mlp_dim": 768,
"num_heads": 3,
"mlp_dropout": 0.0,
"attention_dropout": 0.0,
},
"ViTS16": {
"patch_size": 16,
"transformer_layer_num": 12,
"project_dim": 384,
"mlp_dim": 1536,
"num_heads": 6,
"mlp_dropout": 0.0,
"attention_dropout": 0.0,
},
"ViTB16": {
"patch_size": 16,
"transformer_layer_num": 12,
"project_dim": 768,
"mlp_dim": 3072,
"num_heads": 12,
"mlp_dropout": 0.0,
"attention_dropout": 0.0,
},
"ViTL16": {
"patch_size": 16,
"transformer_layer_num": 24,
"project_dim": 1024,
"mlp_dim": 4096,
"num_heads": 16,
"mlp_dropout": 0.1,
"attention_dropout": 0.0,
},
"ViTH16": {
"patch_size": 16,
"transformer_layer_num": 32,
"project_dim": 1280,
"mlp_dim": 5120,
"num_heads": 16,
"mlp_dropout": 0.1,
"attention_dropout": 0.0,
},
"ViTTiny32": {
"patch_size": 32,
"transformer_layer_num": 12,
"project_dim": 192,
"mlp_dim": 768,
"num_heads": 3,
"mlp_dropout": 0.0,
"attention_dropout": 0.0,
},
"ViTS32": {
"patch_size": 32,
"transformer_layer_num": 12,
"project_dim": 384,
"mlp_dim": 1536,
"num_heads": 6,
"mlp_dropout": 0.0,
"attention_dropout": 0.0,
},
"ViTB32": {
"patch_size": 32,
"transformer_layer_num": 12,
"project_dim": 768,
"mlp_dim": 3072,
"num_heads": 12,
"mlp_dropout": 0.0,
"attention_dropout": 0.0,
},
"ViTL32": {
"patch_size": 32,
"transformer_layer_num": 24,
"project_dim": 1024,
"mlp_dim": 4096,
"num_heads": 16,
"mlp_dropout": 0.1,
"attention_dropout": 0.0,
},
"ViTH32": {
"patch_size": 32,
"transformer_layer_num": 32,
"project_dim": 1280,
"mlp_dim": 5120,
"num_heads": 16,
"mlp_dropout": 0.1,
"attention_dropout": 0.0,
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929v2)
(ICLR 2021)
This function returns a Keras {name} model.
The naming convention of ViT models follows: ViTSize_Patch-size
(i.e. ViTS16).
The following sizes were released in the original paper:
- S (Small)
- B (Base)
- L (Large)
But subsequent work from the same authors introduced:
- Ti (Tiny)
- H (Huge)
The parameter configurations for all of these sizes, at patch sizes 16 and
32 are made available, following the naming convention laid out above.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(scale=1./255.0)`
layer. Note that ViTs expect an input range of `[0..1]` if rescaling
isn't used. Regardless of whether you supply `[0..1]` or the input
is rescaled to `[0..1]`, the inputs will further be rescaled to
`[-1..1]`.
include_top: bool, whether to include the fully-connected layer at the
top of the network. If provided, num_classes must be provided.
num_classes: optional int, number of classes to classify images into,
only to be specified if `include_top` is True.
weights: one of `None` (random initialization), a pretrained weight file
path, or a reference to pre-trained weights
(e.g. 'imagenet/classification') (see available pre-trained weights
in weights.py). Note that the 'imagenet' weights only work on an
input shape of (224, 224, 3) due to the input shape dependent
patching and flattening logic.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
- `token_pooling`, default, means that the token at the start of the
sequences is used instead of regular pooling.
name: (Optional) name to pass to the model, defaults to "{name}".
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
Returns:
A `keras.Model` instance.
""" # noqa: E501
@keras.utils.register_keras_serializable(package="keras_cv.models")
class ViT(keras.Model):
"""Instantiates the ViT architecture.
Args:
mlp_dim: the dimensionality of the hidden Dense layer in the transformer
MLP head
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
name: string, model name.
include_top: bool, whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
or the path to the weights file to be loaded.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
- `token_pooling`, default, means that the token at the start of the
sequences is used instead of regular pooling.
num_classes: optional number of classes to classify images
into, only to be specified if `include_top` is True.
mlp_dim:
project_dim: the latent dimensionality to be projected into in the
output of each stacked transformer encoder
activation: the activation function to use in the first `layers.Dense`
layer in the MLP head of the transformer encoder
attention_dropout: the dropout rate to apply to the `MultiHeadAttention`
in each transformer encoder
mlp_dropout: the dropout rate to apply between `layers.Dense` layers
in the MLP head of the transformer encoder
num_heads: the number of heads to use in the `MultiHeadAttention` layer
of each transformer encoder
transformer_layer_num: the number of transformer encoder layers to stack
in the Vision Transformer
patch_size: the patch size to be supplied to the Patching layer to turn
input images into a flattened sequence of patches
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
**kwargs: Pass-through keyword arguments to `keras.Model`.
"""
def __init__(
self,
include_rescaling,
include_top,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
patch_size=None,
transformer_layer_num=None,
num_heads=None,
mlp_dropout=None,
attention_dropout=None,
activation=None,
project_dim=None,
mlp_dim=None,
classifier_activation="softmax",
**kwargs,
):
if weights and not tf.io.gfile.exists(weights):
raise ValueError(
"The `weights` argument should be either `None` or the path "
"to the weights file to be loaded. Weights file not found at "
"location: {weights}"
)
if include_top and not num_classes:
raise ValueError(
"If `include_top` is True, you should specify `num_classes`. "
f"Received: num_classes={num_classes}"
)
if include_top and pooling:
raise ValueError(
f"`pooling` must be `None` when `include_top=True`."
f"Received pooling={pooling} and include_top={include_top}. "
)
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = layers.Rescaling(1.0 / 255.0, name="rescaling")(x)
# The previous layer rescales [0..255] to [0..1] if applicable
# This one rescales [0..1] to [-1..1] since ViTs expect [-1..1]
x = layers.Rescaling(scale=1.0 / 0.5, offset=-1.0, name="rescaling_2")(
x
)
encoded_patches = PatchingAndEmbedding(project_dim, patch_size)(x)
encoded_patches = layers.Dropout(mlp_dropout)(encoded_patches)
for _ in range(transformer_layer_num):
encoded_patches = TransformerEncoder(
project_dim=project_dim,
mlp_dim=mlp_dim,
num_heads=num_heads,
mlp_dropout=mlp_dropout,
attention_dropout=attention_dropout,
activation=activation,
)(encoded_patches)
output = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
if include_top:
output = output[:, 0]
output = layers.Dense(
num_classes, activation=classifier_activation
)(output)
elif pooling == "token_pooling":
output = output[:, 0]
elif pooling == "avg":
output = layers.GlobalAveragePooling1D()(output)
# Create model.
super().__init__(inputs=inputs, outputs=output, **kwargs)
if weights is not None:
self.load_weights(weights)
self.include_rescaling = include_rescaling
self.include_top = include_top
self.input_tensor = input_tensor
self.pooling = pooling
self.num_classes = num_classes
self.patch_size = patch_size
self.transformer_layer_num = transformer_layer_num
self.num_heads = num_heads
self.mlp_dropout = mlp_dropout
self.attention_dropout = attention_dropout
self.activation = activation
self.project_dim = project_dim
self.mlp_dim = mlp_dim
self.classifier_activation = classifier_activation
def get_config(self):
return {
"include_rescaling": self.include_rescaling,
"include_top": self.include_top,
"name": self.name,
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"pooling": self.pooling,
"num_classes": self.num_classes,
"patch_size": self.patch_size,
"transformer_layer_num": self.transformer_layer_num,
"num_heads": self.num_heads,
"mlp_dropout": self.mlp_dropout,
"attention_dropout": self.attention_dropout,
"activation": self.activation,
"project_dim": self.project_dim,
"mlp_dim": self.mlp_dim,
"classifier_activation": self.classifier_activation,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def ViTTiny16(
*,
include_rescaling,
include_top,
name="ViTTiny16",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTTiny16 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=parse_weights(weights, include_top, "vittiny16"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTTiny16"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTTiny16"][
"transformer_layer_num"
],
project_dim=MODEL_CONFIGS["ViTTiny16"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTTiny16"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTTiny16"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTTiny16"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTTiny16"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTS16(
*,
include_rescaling,
include_top,
name="ViTS16",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTS16 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=parse_weights(weights, include_top, "vits16"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTS16"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTB32"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTS16"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTS16"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTS16"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTS16"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTS16"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTB16(
*,
include_rescaling,
include_top,
name="ViTB16",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTB16 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=parse_weights(weights, include_top, "vitb16"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTB16"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTB16"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTB16"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTB16"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTB16"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTB16"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTB16"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTL16(
*,
include_rescaling,
include_top,
name="ViTL16",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTL16 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=parse_weights(weights, include_top, "vitl16"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTL16"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTL16"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTL16"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTL16"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTL16"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTL16"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTL16"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTH16(
*,
include_rescaling,
include_top,
name="ViTH16",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTH16 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTH16"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTH16"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTH16"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTH16"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTH16"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTH16"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTH16"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTTiny32(
*,
include_rescaling,
include_top,
name="ViTTiny32",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTTiny32 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTTiny32"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTTiny32"][
"transformer_layer_num"
],
project_dim=MODEL_CONFIGS["ViTTiny32"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTTiny32"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTTiny32"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTTiny32"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTTiny32"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTS32(
*,
include_rescaling,
include_top,
name="ViTS32",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTS32 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=parse_weights(weights, include_top, "vits32"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTS32"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTS32"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTS32"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTS32"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTS32"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTS32"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTS32"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTB32(
*,
include_rescaling,
include_top,
name="ViTB32",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTB32 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=parse_weights(weights, include_top, "vitb32"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTB32"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTB32"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTB32"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTB32"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTB32"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTB32"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTB32"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTL32(
*,
include_rescaling,
include_top,
name="ViTL32",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTL32 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTL32"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTL32"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTL32"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTL32"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTL32"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTL32"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTL32"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTH32(
*,
include_rescaling,
include_top,
name="ViTH32",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTH32 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTH32"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTH32"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTH32"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTH32"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTH32"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTH32"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTH32"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
setattr(ViTTiny16, "__doc__", BASE_DOCSTRING.format(name="ViTTiny16"))
setattr(ViTS16, "__doc__", BASE_DOCSTRING.format(name="ViTS16"))
setattr(ViTB16, "__doc__", BASE_DOCSTRING.format(name="ViTB16"))
setattr(ViTL16, "__doc__", BASE_DOCSTRING.format(name="ViTL16"))
setattr(ViTH16, "__doc__", BASE_DOCSTRING.format(name="ViTH16"))
setattr(ViTTiny32, "__doc__", BASE_DOCSTRING.format(name="ViTTiny32"))
setattr(ViTS32, "__doc__", BASE_DOCSTRING.format(name="ViTS32"))
setattr(ViTB32, "__doc__", BASE_DOCSTRING.format(name="ViTB32"))
setattr(ViTL32, "__doc__", BASE_DOCSTRING.format(name="ViTL32"))
setattr(ViTH32, "__doc__", BASE_DOCSTRING.format(name="ViTH32"))
| keras-cv/keras_cv/models/legacy/vit.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/vit.py",
"repo_id": "keras-cv",
"token_count": 11951
} | 27 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.object_detection.yolo_v8.yolo_v8_backbone_presets import (
backbone_presets,
)
from keras_cv.models.object_detection.yolo_v8.yolo_v8_backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.models.object_detection.yolo_v8.yolo_v8_layers import (
apply_conv_bn,
)
from keras_cv.models.object_detection.yolo_v8.yolo_v8_layers import (
apply_csp_block,
)
from keras_cv.utils.python_utils import classproperty
def apply_spatial_pyramid_pooling_fast(
inputs, pool_size=5, activation="swish", name="spp_fast"
):
channel_axis = -1
input_channels = inputs.shape[channel_axis]
hidden_channels = int(input_channels // 2)
x = apply_conv_bn(
inputs,
hidden_channels,
kernel_size=1,
activation=activation,
name=f"{name}_pre",
)
pool_1 = keras.layers.MaxPooling2D(
pool_size=pool_size, strides=1, padding="same", name=f"{name}_pool1"
)(x)
pool_2 = keras.layers.MaxPooling2D(
pool_size=pool_size, strides=1, padding="same", name=f"{name}_pool2"
)(pool_1)
pool_3 = keras.layers.MaxPooling2D(
pool_size=pool_size, strides=1, padding="same", name=f"{name}_pool3"
)(pool_2)
out = ops.concatenate([x, pool_1, pool_2, pool_3], axis=channel_axis)
out = apply_conv_bn(
out,
input_channels,
kernel_size=1,
activation=activation,
name=f"{name}_output",
)
return out
@keras_cv_export("keras_cv.models.YOLOV8Backbone")
class YOLOV8Backbone(Backbone):
"""Implements the YOLOV8 backbone for object detection.
This backbone is a variant of the `CSPDarkNetBackbone` architecture.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
stackwise_channels: A list of ints, the number of channels for each dark
level in the model.
stackwise_depth: A list of ints, the depth for each dark level in the
model.
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
activation: String. The activation functions to use in the backbone to
use in the CSPDarkNet blocks. Defaults to "swish".
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
Returns:
A `keras.Model` instance.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Pretrained backbone
model = keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_xs_backbone_coco"
)
output = model(input_data)
# Randomly initialized backbone with a custom config
model = keras_cv.models.YOLOV8Backbone(
stackwise_channels=[128, 256, 512, 1024],
stackwise_depth=[3, 9, 9, 3],
include_rescaling=False,
)
output = model(input_data)
```
""" # noqa: E501
def __init__(
self,
stackwise_channels,
stackwise_depth,
include_rescaling,
activation="swish",
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = keras.layers.Rescaling(1 / 255.0)(x)
""" Stem """
stem_width = stackwise_channels[0]
x = apply_conv_bn(
x,
stem_width // 2,
kernel_size=3,
strides=2,
activation=activation,
name="stem_1",
)
x = apply_conv_bn(
x,
stem_width,
kernel_size=3,
strides=2,
activation=activation,
name="stem_2",
)
""" blocks """
pyramid_level_inputs = {"P1": utils.get_tensor_input_name(x)}
for stack_id, (channel, depth) in enumerate(
zip(stackwise_channels, stackwise_depth)
):
stack_name = f"stack{stack_id + 1}"
if stack_id >= 1:
x = apply_conv_bn(
x,
channel,
kernel_size=3,
strides=2,
activation=activation,
name=f"{stack_name}_downsample",
)
x = apply_csp_block(
x,
depth=depth,
expansion=0.5,
activation=activation,
name=f"{stack_name}_c2f",
)
if stack_id == len(stackwise_depth) - 1:
x = apply_spatial_pyramid_pooling_fast(
x,
pool_size=5,
activation=activation,
name=f"{stack_name}_spp_fast",
)
pyramid_level_inputs[f"P{stack_id + 2}"] = (
utils.get_tensor_input_name(x)
)
super().__init__(inputs=inputs, outputs=x, **kwargs)
self.pyramid_level_inputs = pyramid_level_inputs
self.stackwise_channels = stackwise_channels
self.stackwise_depth = stackwise_depth
self.include_rescaling = include_rescaling
self.activation = activation
def get_config(self):
config = super().get_config()
config.update(
{
"include_rescaling": self.include_rescaling,
# Remove batch dimension from `input_shape`
"input_shape": self.input_shape[1:],
"stackwise_channels": self.stackwise_channels,
"stackwise_depth": self.stackwise_depth,
"activation": self.activation,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(backbone_presets_with_weights)
| keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_backbone.py",
"repo_id": "keras-cv",
"token_count": 3298
} | 28 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from keras_cv.models.object_detection.yolox.layers import YoloXPAFPN
from keras_cv.tests.test_case import TestCase
class YoloXLabelEncoderTest(TestCase):
def test_num_parameters(self):
input1 = keras.Input((80, 80, 256))
input2 = keras.Input((40, 40, 512))
input3 = keras.Input((20, 20, 1024))
output = YoloXPAFPN()({3: input1, 4: input2, 5: input3})
model = keras.models.Model(
inputs=[input1, input2, input3], outputs=output
)
keras_params = sum(
[keras.backend.count_params(p) for p in model.trainable_weights]
)
# taken from original implementation
original_params = 19523072
self.assertEqual(keras_params, original_params)
def test_output_shape(self):
inputs = {
3: tf.random.uniform((3, 80, 80, 256)),
4: tf.random.uniform((3, 40, 40, 512)),
5: tf.random.uniform((3, 20, 20, 1024)),
}
output1, output2, output3 = YoloXPAFPN()(inputs)
self.assertEqual(output1.shape, [3, 80, 80, 256])
self.assertEqual(output2.shape, [3, 40, 40, 512])
self.assertEqual(output3.shape, [3, 20, 20, 1024])
| keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_pafpn_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_pafpn_test.py",
"repo_id": "keras-cv",
"token_count": 732
} | 29 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from tensorflow.keras import layers
from keras_cv.layers import preprocessing
from keras_cv.training import ContrastiveTrainer
class SimCLRTrainer(ContrastiveTrainer):
"""Creates a SimCLRTrainer.
References:
- [SimCLR paper](https://arxiv.org/pdf/2002.05709)
Args:
encoder: a `keras.Model` to be pre-trained. In most cases, this encoder
should not include a top dense layer.
augmenter: a SimCLRAugmenter layer to randomly augment input
images for contrastive learning
projection_width: the width of the two-layer dense model used for
projection in the SimCLR paper
"""
def __init__(self, encoder, augmenter, projection_width=128, **kwargs):
super().__init__(
encoder=encoder,
augmenter=augmenter,
projector=keras.Sequential(
[
layers.Dense(projection_width, activation="relu"),
layers.Dense(projection_width),
layers.BatchNormalization(),
],
name="projector",
),
**kwargs,
)
class SimCLRAugmenter(keras.Sequential):
def __init__(
self,
value_range,
height=128,
width=128,
crop_area_factor=(0.08, 1.0),
aspect_ratio_factor=(3 / 4, 4 / 3),
grayscale_rate=0.2,
color_jitter_rate=0.8,
brightness_factor=0.2,
contrast_factor=0.8,
saturation_factor=(0.3, 0.7),
hue_factor=0.2,
**kwargs,
):
return super().__init__(
[
preprocessing.RandomFlip("horizontal"),
preprocessing.RandomCropAndResize(
target_size=(height, width),
crop_area_factor=crop_area_factor,
aspect_ratio_factor=aspect_ratio_factor,
),
preprocessing.RandomApply(
preprocessing.Grayscale(output_channels=3),
rate=grayscale_rate,
),
preprocessing.RandomApply(
preprocessing.RandomColorJitter(
value_range=value_range,
brightness_factor=brightness_factor,
contrast_factor=contrast_factor,
saturation_factor=saturation_factor,
hue_factor=hue_factor,
),
rate=color_jitter_rate,
),
],
**kwargs,
)
| keras-cv/keras_cv/training/contrastive/simclr_trainer.py/0 | {
"file_path": "keras-cv/keras_cv/training/contrastive/simclr_trainer.py",
"repo_id": "keras-cv",
"token_count": 1539
} | 30 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv.backend import ops
def to_numpy(x):
if x is None:
return None
if isinstance(x, tf.RaggedTensor):
x = x.to_tensor(-1)
x = ops.convert_to_numpy(x)
# Important for consistency when working with visualization utilities
return np.ascontiguousarray(x)
| keras-cv/keras_cv/utils/to_numpy.py/0 | {
"file_path": "keras-cv/keras_cv/utils/to_numpy.py",
"repo_id": "keras-cv",
"token_count": 287
} | 31 |
{
"name": "Keras-cv",
"build": {
"dockerfile": "Dockerfile",
"args": {
"VERSION": "2.11.0"
// Uncomment this if GPU support is required
// "VERSION": "2.11.0-gpu",
}
},
"customizations": {
"vscode": {
"settings": {
"python.linting.enabled": true,
"python.linting.flake8Enabled": true,
"python.linting.pylintEnabled": false,
"python.testing.pytestEnabled": true,
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports": true
},
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter"
},
"editor.rulers": [
80
]
},
"extensions": [
"ms-python.python",
"ms-python.isort",
"ms-python.flake8",
"ms-python.black-formatter",
"ms-vscode.cpptools",
"xaver.clang-format"
]
}
},
"features": {
"ghcr.io/devcontainers/features/github-cli:1": {}
},
// TODO: Improve to allow dynamic runArgs, see microsoft/vscode-remote-release#3972
// Uncomment this if GPU support is required
// "runArgs": [
// "--gpus=all"
// ],
"onCreateCommand": "locale-gen \"en_US.UTF-8\"",
// Optional: install pre-commit hooks
// "postCreateCommand": "git config core.hooksPath .github/.githooks"
"postCreateCommand": "sh /setup.sh"
} | keras-cv/.devcontainer/devcontainer.json/0 | {
"file_path": "keras-cv/.devcontainer/devcontainer.json",
"repo_id": "keras-cv",
"token_count": 571
} | 0 |
build_file: "keras-cv/.kokoro/github/ubuntu/gpu/build.sh"
action {
define_artifacts {
regex: "**/sponge_log.log"
regex: "**/sponge_log.xml"
}
}
env_vars: {
key: "KERAS2"
value: "1"
}
# Set timeout to 60 mins from default 180 mins
timeout_mins: 60 | keras-cv/.kokoro/github/ubuntu/gpu/keras2/continuous.cfg/0 | {
"file_path": "keras-cv/.kokoro/github/ubuntu/gpu/keras2/continuous.cfg",
"repo_id": "keras-cv",
"token_count": 116
} | 1 |
import math
import random
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import keras_cv
from keras_cv.metrics import coco
def produce_random_data(
include_confidence=False, num_images=128, num_classes=20
):
"""Generates a fake list of bounding boxes for use in this test.
Returns:
a tensor list of size [128, 25, 5/6]. This represents 128 images, 25
bboxes and 5/6 dimensions to represent each bbox depending on if
confidence is set.
"""
images = []
for _ in range(num_images):
num_boxes = math.floor(25 * random.uniform(0, 1))
classes_in_image = np.floor(np.random.rand(num_boxes, 1) * num_classes)
bboxes = np.random.rand(num_boxes, 4)
boxes = np.concatenate([bboxes, classes_in_image], axis=-1)
if include_confidence:
confidence = np.random.rand(num_boxes, 1)
boxes = np.concatenate([boxes, confidence], axis=-1)
images.append(
keras_cv.utils.bounding_box.xywh_to_corners(
tf.constant(boxes, dtype=tf.float32)
)
)
images = [keras_cv.bounding_box.to_dense(x, max_boxes=25) for x in images]
return tf.stack(images, axis=0)
y_true = produce_random_data()
y_pred = produce_random_data(include_confidence=True)
class_ids = list(range(20))
bucket_values = [500, 1000, 2000, 3500, 5000, 7500, 10000]
update_state_runtimes = []
result_runtimes = []
end_to_end_runtimes = []
for buckets in bucket_values:
metric = coco._COCOMeanAveragePrecision(class_ids, num_buckets=buckets)
# warm up
metric.update_state(y_true, y_pred)
metric.result()
start = time.time()
metric.update_state(y_true, y_pred)
update_state_done = time.time()
r = metric.result()
end = time.time()
update_state_runtimes.append(update_state_done - start)
result_runtimes.append(end - update_state_done)
end_to_end_runtimes.append(end - start)
print("end_to_end_runtimes", end_to_end_runtimes)
data = pd.DataFrame(
{
"bucket_values": bucket_values,
"update_state_runtimes": update_state_runtimes,
"result_runtimes": result_runtimes,
"end_to_end_runtimes": end_to_end_runtimes,
}
)
sns.lineplot(data=data, x="bucket_values", y="update_state_runtimes")
plt.xlabel("Number of Confidence Buckets")
plt.ylabel("update_state() runtime (seconds)")
plt.title("Runtime of update_state()")
plt.show()
sns.lineplot(data=data, x="bucket_values", y="result_runtimes")
plt.xlabel("Number of Confidence Buckets")
plt.ylabel("result() runtime (seconds)")
plt.title("Runtime of result()")
plt.show()
sns.lineplot(data=data, x="bucket_values", y="end_to_end_runtimes")
plt.xlabel("Number of Confidence Buckets")
plt.ylabel("End to end runtime (seconds)")
plt.title("Runtimes of update_state() followed by result()")
plt.show()
| keras-cv/benchmarks/metrics/coco/mean_average_precision_bucket_performance.py/0 | {
"file_path": "keras-cv/benchmarks/metrics/coco/mean_average_precision_bucket_performance.py",
"repo_id": "keras-cv",
"token_count": 1209
} | 2 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import RandomSaturation
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldRandomSaturation(BaseImageAugmentationLayer):
"""Randomly adjusts the saturation on given images.
This layer will randomly increase/reduce the saturation for the input RGB
images. At inference time, the output will be identical to the input.
Call the layer with `training=True` to adjust the saturation of the input.
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image saturation is impacted. `factor=0.5` makes this layer perform
a no-op operation. `factor=0.0` makes the image to be fully
grayscale. `factor=1.0` makes the image to be fully saturated.
Values should be between `0.0` and `1.0`. If a tuple is used, a
`factor` is sampled between the two values for every image
augmented. If a single float is used, a value between `0.0` and the
passed float is sampled. In order to ensure the value is always the
same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
seed: Integer. Used to create a random seed.
"""
def __init__(self, factor, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.factor = preprocessing_utils.parse_factor(
factor,
min_value=0.0,
max_value=1.0,
)
self.seed = seed
def get_random_transformation(self, **kwargs):
return self.factor()
def augment_image(self, image, transformation=None, **kwargs):
# Convert the factor range from [0, 1] to [0, +inf]. Note that the
# tf.image.adjust_saturation is trying to apply the following math
# formula `output_saturation = input_saturation * factor`. We use the
# following method to the do the mapping.
# `y = x / (1 - x)`.
# This will ensure:
# y = +inf when x = 1 (full saturation)
# y = 1 when x = 0.5 (no augmentation)
# y = 0 when x = 0 (full gray scale)
# Convert the transformation to tensor in case it is a float. When
# transformation is 1.0, then it will result in to divide by zero error,
# but it will be handled correctly when it is a one tensor.
transformation = tf.convert_to_tensor(transformation)
adjust_factor = transformation / (1 - transformation)
return tf.image.adjust_saturation(
image, saturation_factor=adjust_factor
)
def augment_bounding_boxes(
self, bounding_boxes, transformation=None, **kwargs
):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = {
"factor": self.factor,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["factor"], dict):
config["factor"] = keras.utils.deserialize_keras_object(
config["factor"]
)
return cls(**config)
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
x_train.shape
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [RandomSaturation, OldRandomSaturation]
aug_args = {"factor": (0.5)}
for aug in aug_candidates:
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
| keras-cv/benchmarks/vectorized_random_saturation.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_saturation.py",
"repo_id": "keras-cv",
"token_count": 2335
} | 3 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import demo_utils
import tensorflow as tf
from keras_cv import layers as cv_layers
def _default_anchor_generator(bounding_box_format):
strides = [50]
sizes = [100.0]
scales = [1.0]
aspect_ratios = [1.0]
return cv_layers.AnchorGenerator(
bounding_box_format=bounding_box_format,
anchor_sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
clip_boxes=True,
)
generator = _default_anchor_generator(bounding_box_format="xywh")
def pair_with_anchor_boxes(inputs):
images = inputs["images"]
anchor_boxes = generator(images[0])
anchor_boxes = anchor_boxes[0]
anchor_boxes = tf.expand_dims(anchor_boxes, axis=0)
anchor_boxes = tf.tile(anchor_boxes, [tf.shape(images)[0], 1, 1])
inputs["bounding_boxes"] = anchor_boxes
return inputs
if __name__ == "__main__":
dataset = demo_utils.load_voc_dataset(bounding_box_format="xywh")
result = dataset.map(
pair_with_anchor_boxes, num_parallel_calls=tf.data.AUTOTUNE
)
demo_utils.visualize_data(result, bounding_box_format="xywh")
| keras-cv/examples/layers/object_detection/anchor_generator_configuration.py/0 | {
"file_path": "keras-cv/examples/layers/object_detection/anchor_generator_configuration.py",
"repo_id": "keras-cv",
"token_count": 627
} | 4 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for preprocessing demos."""
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow import keras
def resize(image, label, img_size=(224, 224), num_classes=10):
image = tf.image.resize(image, img_size)
label = tf.one_hot(label, num_classes)
return {"images": image, "labels": label}
def load_oxford_dataset(
name="oxford_flowers102",
batch_size=64,
img_size=(224, 224),
as_supervised=True,
):
# Load dataset.
data, ds_info = tfds.load(name, as_supervised=as_supervised, with_info=True)
train_ds = data["train"]
num_classes = ds_info.features["label"].num_classes
# Get tf dataset.
train_ds = train_ds.map(
lambda x, y: resize(x, y, img_size=img_size, num_classes=num_classes)
).batch(batch_size)
return train_ds
def visualize_dataset(ds):
outputs = next(iter(ds.take(1)))
images = outputs["images"]
plt.figure(figsize=(8, 8))
for i in range(9):
plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.axis("off")
plt.show()
def gallery_show(images):
images = images.astype(int)
for i in range(9):
image = images[i]
plt.subplot(3, 3, i + 1)
plt.imshow(image.astype("uint8"))
plt.axis("off")
plt.show()
def load_elephant_tensor(output_size=(300, 300)):
elephants = keras.utils.get_file(
"african_elephant.jpg", "https://i.imgur.com/Bvro0YD.png"
)
elephants = keras.utils.load_img(elephants, target_size=output_size)
elephants = keras.utils.img_to_array(elephants)
many_elephants = tf.repeat(tf.expand_dims(elephants, axis=0), 9, axis=0)
return many_elephants
| keras-cv/examples/layers/preprocessing/classification/demo_utils.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/classification/demo_utils.py",
"repo_id": "keras-cv",
"token_count": 909
} | 5 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""resize_demo.py shows how to use the Resizing preprocessing layer.
Uses the oxford iiit pet_dataset. In this script the pets
are loaded, then are passed through the preprocessing layers.
Finally, they are shown using matplotlib.
"""
import tensorflow as tf
import tensorflow_datasets as tfds
from keras_cv.layers import preprocessing
from keras_cv.visualization import plot_image_gallery
def load_data():
ds = tfds.load(
name="oxford_iiit_pet",
split="train",
)
return ds.map(
lambda inputs: {
"images": tf.cast(inputs["image"], dtype=tf.float32),
"segmentation_masks": inputs["segmentation_mask"] - 1,
}
)
def map_fn_for_visualization(inputs):
masks = tf.cast(inputs["segmentation_masks"], dtype=tf.float32) / 2.0
images = tf.expand_dims(inputs["images"], axis=0)
masks = tf.expand_dims(masks, axis=0)
masks = tf.repeat(masks, repeats=3, axis=-1)
image_masks = tf.concat([images, masks], axis=2)
return image_masks[0]
def main():
ds = load_data()
resize = preprocessing.Resizing(
256,
256,
interpolation="bilinear",
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
bounding_box_format=None,
)
resize_crop = preprocessing.Resizing(
256,
256,
interpolation="bilinear",
crop_to_aspect_ratio=True,
pad_to_aspect_ratio=False,
bounding_box_format=None,
)
resize_pad = preprocessing.Resizing(
256,
256,
interpolation="bilinear",
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=True,
bounding_box_format=None,
)
ds_resize = ds.map(resize, num_parallel_calls=tf.data.AUTOTUNE)
ds_crop = ds.map(resize_crop, num_parallel_calls=tf.data.AUTOTUNE)
ds_pad = ds.map(resize_pad, num_parallel_calls=tf.data.AUTOTUNE)
ds_resize = ds_resize.map(map_fn_for_visualization).batch(8)
ds_crop = ds_crop.map(map_fn_for_visualization).batch(8)
ds_pad = ds_pad.map(map_fn_for_visualization).batch(8)
plot_image_gallery(
next(iter(ds_resize.take(1))),
value_range=(0, 1),
scale=3,
rows=2,
cols=4,
path="resize.png",
)
plot_image_gallery(
next(iter(ds_crop.take(1))),
value_range=(0, 1),
scale=3,
rows=2,
cols=4,
path="resize_crop.png",
)
plot_image_gallery(
next(iter(ds_pad.take(1))),
value_range=(0, 1),
scale=3,
rows=2,
cols=4,
path="resize_pad.png",
)
if __name__ == "__main__":
main()
| keras-cv/examples/layers/preprocessing/segmentation/resize_demo.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/segmentation/resize_demo.py",
"repo_id": "keras-cv",
"token_count": 1440
} | 6 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
from keras_cv.backend import keras
try:
import namex
except ImportError:
namex = None
def maybe_register_serializable(symbol, package):
if isinstance(symbol, types.FunctionType) or hasattr(symbol, "get_config"):
keras.saving.register_keras_serializable(package=package)(symbol)
if namex:
class keras_cv_export(namex.export):
def __init__(self, path, package="keras_cv"):
super().__init__(package="keras_cv", path=path)
self.package = package
def __call__(self, symbol):
maybe_register_serializable(symbol, self.package)
return super().__call__(symbol)
else:
class keras_cv_export:
def __init__(self, path, package="keras_cv"):
self.package = package
def __call__(self, symbol):
maybe_register_serializable(symbol, self.package)
return symbol
| keras-cv/keras_cv/api_export.py/0 | {
"file_path": "keras-cv/keras_cv/api_export.py",
"repo_id": "keras-cv",
"token_count": 539
} | 7 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for iou functions."""
import numpy as np
from keras_cv.bounding_box import iou as iou_lib
from keras_cv.tests.test_case import TestCase
class IoUTest(TestCase):
def test_compute_single_iou(self):
bb1 = np.array([[100, 101, 200, 201]])
bb1_off_by_1 = np.array([[101, 102, 201, 202]])
# area of bb1 and bb1_off_by_1 are each 10000.
# intersection area is 99*99=9801
# iou=9801/(2*10000 - 9801)=0.96097656633
self.assertAllClose(
iou_lib.compute_iou(bb1, bb1_off_by_1, "yxyx")[0], [0.96097656633]
)
def test_compute_iou(self):
bb1 = [100, 101, 200, 201]
bb1_off_by_1_pred = [101, 102, 201, 202]
iou_bb1_bb1_off = 0.96097656633
top_left_bounding_box = [0, 2, 1, 3]
far_away_box = [1300, 1400, 1500, 1401]
another_far_away_pred = [1000, 1400, 1200, 1401]
# Rows represent predictions, columns ground truths
expected_result = np.array(
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
dtype=np.float32,
)
sample_y_true = np.array([bb1, top_left_bounding_box, far_away_box])
sample_y_pred = np.array(
[bb1_off_by_1_pred, top_left_bounding_box, another_far_away_pred],
)
result = iou_lib.compute_iou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(expected_result, result)
def test_batched_compute_iou(self):
bb1 = [100, 101, 200, 201]
bb1_off_by_1_pred = [101, 102, 201, 202]
iou_bb1_bb1_off = 0.96097656633
top_left_bounding_box = [0, 2, 1, 3]
far_away_box = [1300, 1400, 1500, 1401]
another_far_away_pred = [1000, 1400, 1200, 1401]
# Rows represent predictions, columns ground truths
expected_result = np.array(
[
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
],
)
sample_y_true = np.array(
[
[bb1, top_left_bounding_box, far_away_box],
[bb1, top_left_bounding_box, far_away_box],
],
)
sample_y_pred = np.array(
[
[
bb1_off_by_1_pred,
top_left_bounding_box,
another_far_away_pred,
],
[
bb1_off_by_1_pred,
top_left_bounding_box,
another_far_away_pred,
],
],
)
result = iou_lib.compute_iou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(expected_result, result)
def test_batched_boxes1_unbatched_boxes2(self):
bb1 = [100, 101, 200, 201]
bb1_off_by_1_pred = [101, 102, 201, 202]
iou_bb1_bb1_off = 0.96097656633
top_left_bounding_box = [0, 2, 1, 3]
far_away_box = [1300, 1400, 1500, 1401]
another_far_away_pred = [1000, 1400, 1200, 1401]
# Rows represent predictions, columns ground truths
expected_result = np.array(
[
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
],
)
sample_y_true = np.array(
[
[bb1, top_left_bounding_box, far_away_box],
[bb1, top_left_bounding_box, far_away_box],
],
)
sample_y_pred = np.array(
[bb1_off_by_1_pred, top_left_bounding_box, another_far_away_pred],
)
result = iou_lib.compute_iou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(expected_result, result)
def test_unbatched_boxes1_batched_boxes2(self):
bb1 = [100, 101, 200, 201]
bb1_off_by_1_pred = [101, 102, 201, 202]
iou_bb1_bb1_off = 0.96097656633
top_left_bounding_box = [0, 2, 1, 3]
far_away_box = [1300, 1400, 1500, 1401]
another_far_away_pred = [1000, 1400, 1200, 1401]
# Rows represent predictions, columns ground truths
expected_result = np.array(
[
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
[[iou_bb1_bb1_off, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]],
],
)
sample_y_true = np.array(
[
[bb1, top_left_bounding_box, far_away_box],
],
)
sample_y_pred = np.array(
[
[
bb1_off_by_1_pred,
top_left_bounding_box,
another_far_away_pred,
],
[
bb1_off_by_1_pred,
top_left_bounding_box,
another_far_away_pred,
],
],
)
result = iou_lib.compute_iou(sample_y_true, sample_y_pred, "yxyx")
self.assertAllClose(expected_result, result)
| keras-cv/keras_cv/bounding_box/iou_test.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/iou_test.py",
"repo_id": "keras-cv",
"token_count": 3167
} | 8 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras.callbacks import Callback
from keras_cv.api_export import keras_cv_export
from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI
from keras_cv.utils import assert_waymo_open_dataset_installed
try:
from waymo_open_dataset import label_pb2
from waymo_open_dataset.metrics.python.wod_detection_evaluator import (
WODDetectionEvaluator,
)
from waymo_open_dataset.protos import breakdown_pb2
from waymo_open_dataset.protos import metrics_pb2
except ImportError:
WODDetectionEvaluator = None
@keras_cv_export("keras_cv.callbacks.WaymoEvaluationCallback")
class WaymoEvaluationCallback(Callback):
def __init__(self, validation_data, config=None, **kwargs):
"""Creates a callback to evaluate Waymo Open Dataset (WOD) metrics on a
validation dataset.
Args:
validation_data: a tf.data.Dataset containing validation data.
Entries should have the form `(point_clouds, {"bounding_boxes":
bounding_boxes}`. Padded bounding box should have a class of -1
to be correctly filtered out.
config: an optional `metrics_pb2.Config` object from WOD to specify
what metrics should be evaluated.
"""
assert_waymo_open_dataset_installed(
"keras_cv.callbacks.WaymoEvaluationCallback()"
)
self.val_data = validation_data
self.evaluator = WODDetectionEvaluator(
config=config or self._get_default_config()
)
super().__init__(**kwargs)
def _get_default_config(self):
"""Returns the default Config proto for detection."""
config = metrics_pb2.Config()
config.breakdown_generator_ids.append(
breakdown_pb2.Breakdown.OBJECT_TYPE
)
difficulty = config.difficulties.add()
difficulty.levels.append(label_pb2.Label.LEVEL_1)
difficulty.levels.append(label_pb2.Label.LEVEL_2)
config.matcher_type = metrics_pb2.MatcherProto.TYPE_HUNGARIAN
config.iou_thresholds.append(0.0) # Unknown
config.iou_thresholds.append(0.7) # Vehicle
config.iou_thresholds.append(0.5) # Pedestrian
config.iou_thresholds.append(0.5) # Sign
config.iou_thresholds.append(0.5) # Cyclist
config.box_type = label_pb2.Label.Box.TYPE_3D
for i in range(100):
config.score_cutoffs.append(i * 0.01)
config.score_cutoffs.append(1.0)
return config
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
gt, preds = self._eval_dataset(self.val_data)
self.evaluator.update_state(gt, preds)
metrics = self.evaluator.result()
metrics_dict = {
"average_precision_vehicle_l1": metrics.average_precision[0],
"average_precision_vehicle_l2": metrics.average_precision[1],
"average_precision_ped_l1": metrics.average_precision[2],
"average_precision_ped_l2": metrics.average_precision[3],
}
logs.update(metrics_dict)
def _eval_dataset(self, dataset):
def point_clouds_only(point_clouds, target):
return point_clouds
def boxes_only(point_clouds, target):
return target["3d_boxes"]
model_outputs = self.model.predict(dataset.map(point_clouds_only))[
"3d_boxes"
]
def flatten_target(boxes):
return tf.concat(
[
boxes["boxes"],
tf.expand_dims(
tf.cast(boxes["classes"], tf.float32), axis=-1
),
tf.expand_dims(
tf.cast(boxes["difficulty"], tf.float32), axis=-1
),
],
axis=-1,
)
gt_boxes = tf.concat(
[flatten_target(x) for x in iter(dataset.map(boxes_only))], axis=0
)
boxes_per_gt_frame = gt_boxes.shape[1]
num_frames = gt_boxes.shape[0]
gt_boxes = tf.reshape(gt_boxes, (num_frames * boxes_per_gt_frame, 9))
# Remove padded boxes
gt_real_boxes = tf.concat(
[x["mask"] for x in iter(dataset.map(boxes_only))], axis=0
)
gt_real_boxes = tf.reshape(
gt_real_boxes, (num_frames * boxes_per_gt_frame)
)
gt_boxes = tf.boolean_mask(gt_boxes, gt_real_boxes)
frame_ids = tf.cast(tf.linspace(1, num_frames, num_frames), tf.int64)
ground_truth = {
"ground_truth_frame_id": tf.boolean_mask(
tf.repeat(frame_ids, boxes_per_gt_frame), gt_real_boxes
),
"ground_truth_bbox": gt_boxes[:, : CENTER_XYZ_DXDYDZ_PHI.PHI + 1],
"ground_truth_type": tf.cast(
gt_boxes[:, CENTER_XYZ_DXDYDZ_PHI.CLASS], tf.uint8
),
"ground_truth_difficulty": tf.cast(
gt_boxes[:, CENTER_XYZ_DXDYDZ_PHI.CLASS + 1], tf.uint8
),
}
boxes_per_pred_frame = model_outputs["boxes"].shape[1]
total_predicted_boxes = boxes_per_pred_frame * num_frames
predicted_boxes = tf.reshape(
model_outputs["boxes"], (total_predicted_boxes, 7)
)
predicted_classes = tf.cast(
tf.reshape(model_outputs["classes"], (total_predicted_boxes, 1)),
tf.uint8,
)
prediction_scores = tf.reshape(
model_outputs["confidence"], (total_predicted_boxes, 1)
)
# Remove boxes that come from padding
pred_real_boxes = tf.squeeze(prediction_scores > 0)
predicted_boxes = tf.boolean_mask(predicted_boxes, pred_real_boxes)
predicted_classes = tf.boolean_mask(predicted_classes, pred_real_boxes)
prediction_scores = tf.boolean_mask(prediction_scores, pred_real_boxes)
predictions = {
"prediction_frame_id": tf.boolean_mask(
tf.repeat(frame_ids, boxes_per_pred_frame), pred_real_boxes
),
"prediction_bbox": predicted_boxes,
"prediction_type": tf.squeeze(predicted_classes),
"prediction_score": tf.squeeze(prediction_scores),
"prediction_overlap_nlz": tf.cast(
tf.zeros(predicted_boxes.shape[0]), tf.bool
),
}
return ground_truth, predictions
| keras-cv/keras_cv/callbacks/waymo_evaluation_callback.py/0 | {
"file_path": "keras-cv/keras_cv/callbacks/waymo_evaluation_callback.py",
"repo_id": "keras-cv",
"token_count": 3284
} | 9 |
/* Copyright 2023 The KerasCV Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#define EIGEN_USE_THREADS
#include "keras_cv/custom_ops/box_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
namespace kerascv {
class WithinAnyBoxOp : public OpKernel {
public:
explicit WithinAnyBoxOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& points = ctx->input(0);
const Tensor& boxes = ctx->input(1);
const int num_points = points.dim_size(0);
const int num_boxes = boxes.dim_size(0);
Tensor* within_any_box = nullptr;
OP_REQUIRES_OK(
ctx, ctx->allocate_output("within_any_box", TensorShape({num_points}),
&within_any_box));
auto within_any_box_t = within_any_box->flat<bool>();
for (auto i = 0; i < num_points; ++i) within_any_box_t(i) = false;
std::vector<box::Upright3DBox> boxes_vec = box::ParseBoxesFromTensor(boxes);
std::vector<box::Vertex> points_vec = box::ParseVerticesFromTensor(points);
auto within_fn = [&boxes_vec, &points_vec, &within_any_box_t](int64_t begin,
int64_t end) {
for (int64_t idx = begin; idx < end; ++idx) {
box::Upright3DBox& box = boxes_vec[idx];
for (uint64_t p_idx = 0; p_idx < points_vec.size(); ++p_idx) {
if (within_any_box_t(p_idx)) {
continue;
}
auto point = points_vec[p_idx];
if (box.WithinBox3D(point)) {
within_any_box_t(p_idx) = true;
}
}
}
};
const CPUDevice& device = ctx->eigen_device<CPUDevice>();
const Eigen::TensorOpCost cost(num_points, num_boxes, 3);
device.parallelFor(num_boxes, cost, within_fn);
}
};
REGISTER_KERNEL_BUILDER(Name("KcvWithinAnyBox").Device(DEVICE_CPU),
WithinAnyBoxOp);
} // namespace kerascv
} // namespace tensorflow
| keras-cv/keras_cv/custom_ops/kernels/within_any_box_op.cc/0 | {
"file_path": "keras-cv/keras_cv/custom_ops/kernels/within_any_box_op.cc",
"repo_id": "keras-cv",
"token_count": 1136
} | 10 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.layers.FeaturePyramid")
class FeaturePyramid(keras.layers.Layer):
"""Implements a Feature Pyramid Network.
This implements the paper:
Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan,
and Serge Belongie. Feature Pyramid Networks for Object Detection.
(https://arxiv.org/pdf/1612.03144)
Feature Pyramid Networks (FPNs) are basic components that are added to an
existing feature extractor (CNN) to combine features at different scales.
For the basic FPN, the inputs are features `Ci` from different levels of a
CNN, which is usually the last block for each level, where the feature is
scaled from the image by a factor of `1/2^i`.
There is an output associated with each level in the basic FPN. The output
Pi at level `i` (corresponding to Ci) is given by performing a merge
operation on the outputs of:
1) a lateral operation on Ci (usually a conv2D layer with kernel = 1 and
strides = 1)
2) a top-down upsampling operation from Pi+1 (except for the top most level)
The final output of each level will also have a conv2D operation
(typically with kernel = 3 and strides = 1).
The inputs to the layer should be a dict with int keys should match the
pyramid_levels, e.g. for `pyramid_levels` = [2,3,4,5], the expected input
dict should be `{2:c2, 3:c3, 4:c4, 5:c5}`.
The output of the layer will have same structures as the inputs, a dict with
int keys and value for each of the level.
Args:
min_level: a python int for the lowest level of the pyramid for
feature extraction.
max_level: a python int for the highest level of the pyramid for
feature extraction.
num_channels: an integer representing the number of channels for the FPN
operations, defaults to 256.
lateral_layers: a python dict with int keys that matches to each of the
pyramid level. The values of the dict should be `keras.Layer`, which
will be called with feature activation outputs from backbone at each
level. Defaults to None, and a `keras.Conv2D` layer with kernel 1x1
will be created for each pyramid level.
output_layers: a python dict with int keys that matches to each of the
pyramid level. The values of the dict should be `keras.Layer`, which
will be called with feature inputs and merged result from upstream
levels. Defaults to None, and a `keras.Conv2D` layer with kernel 3x3
will be created for each pyramid level.
Sample Usage:
```python
inp = keras.layers.Input((384, 384, 3))
backbone = keras.applications.EfficientNetB0(
input_tensor=inp,
include_top=False
)
layer_names = ['block2b_add',
'block3b_add',
'block5c_add',
'top_activation'
]
backbone_outputs = {}
for i, layer_name in enumerate(layer_names):
backbone_outputs[i+2] = backbone.get_layer(layer_name).output
# output_dict is a dict with 2, 3, 4, 5 as keys
output_dict = keras_cv.layers.FeaturePyramid(
min_level=2,
max_level=5
)(backbone_outputs)
```
"""
def __init__(
self,
min_level,
max_level,
num_channels=256,
lateral_layers=None,
output_layers=None,
**kwargs,
):
super().__init__(**kwargs)
self.min_level = min_level
self.max_level = max_level
self.pyramid_levels = list(range(min_level, max_level + 1))
self.num_channels = num_channels
# required for successful serialization
self.lateral_layers_passed = lateral_layers
self.output_layers_passed = output_layers
if not lateral_layers:
# populate self.lateral_ops with default FPN Conv2D 1X1 layers
self.lateral_layers = {}
for i in self.pyramid_levels:
self.lateral_layers[i] = keras.layers.Conv2D(
self.num_channels,
kernel_size=1,
strides=1,
padding="same",
name=f"lateral_P{i}",
)
else:
self._validate_user_layers(lateral_layers, "lateral_layers")
self.lateral_layers = lateral_layers
# Output conv2d layers.
if not output_layers:
self.output_layers = {}
for i in self.pyramid_levels:
self.output_layers[i] = keras.layers.Conv2D(
self.num_channels,
kernel_size=3,
strides=1,
padding="same",
name=f"output_P{i}",
)
else:
self._validate_user_layers(output_layers, "output_layers")
self.output_layers = output_layers
# the same upsampling layer is used for all levels
self.top_down_op = keras.layers.UpSampling2D(size=2)
# the same merge layer is used for all levels
self.merge_op = keras.layers.Add()
def _validate_user_layers(self, user_input, param_name):
if (
not isinstance(user_input, dict)
or sorted(user_input.keys()) != self.pyramid_levels
):
raise ValueError(
f"Expect {param_name} to be a dict with keys as "
f"{self.pyramid_levels}, got {user_input}"
)
def call(self, features):
# Note that this assertion might not be true for all the subclasses. It
# is possible to have FPN that has high levels than the height of
# backbone outputs.
if (
not isinstance(features, dict)
or sorted(features.keys()) != self.pyramid_levels
):
raise ValueError(
"FeaturePyramid expects input features to be a dict with int "
"keys that match the values provided in pyramid_levels. "
f"Expect feature keys: {self.pyramid_levels}, got: {features}"
)
return self.build_feature_pyramid(features)
def build_feature_pyramid(self, input_features):
# To illustrate the connection/topology, the basic flow for a FPN with
# level 3, 4, 5 is like below:
#
# input_l5 -> conv2d_1x1_l5 ----V---> conv2d_3x3_l5 -> output_l5
# V
# upsample2d
# V
# input_l4 -> conv2d_1x1_l4 -> Add -> conv2d_3x3_l4 -> output_l4
# V
# upsample2d
# V
# input_l3 -> conv2d_1x1_l3 -> Add -> conv2d_3x3_l3 -> output_l3
output_features = {}
reversed_levels = list(sorted(input_features.keys(), reverse=True))
top_level = reversed_levels[0]
for level in reversed_levels:
output = self.lateral_layers[level](input_features[level])
if level < top_level:
# for the top most output, it doesn't need to merge with any
# upper stream outputs
upstream_output = self.top_down_op(output_features[level + 1])
output = self.merge_op([output, upstream_output])
output_features[level] = output
# Post apply the output layers so that we don't leak them to the down
# stream level
for level in reversed_levels:
output_features[level] = self.output_layers[level](
output_features[level]
)
return output_features
def get_config(self):
config = {
"min_level": self.min_level,
"max_level": self.max_level,
"num_channels": self.num_channels,
"lateral_layers": self.lateral_layers_passed,
"output_layers": self.output_layers_passed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/feature_pyramid.py/0 | {
"file_path": "keras-cv/keras_cv/layers/feature_pyramid.py",
"repo_id": "keras-cv",
"token_count": 3879
} | 11 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from typing import Mapping
from typing import Optional
from typing import Tuple
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.backend import assert_tf_keras
def _feature_bilinear_interpolation(
features: tf.Tensor, kernel_y: tf.Tensor, kernel_x: tf.Tensor
) -> tf.Tensor:
"""
Feature bilinear interpolation.
The RoIAlign feature f can be computed by bilinear interpolation
of four neighboring feature points f0, f1, f2, and f3.
f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T
[f10, f11]]
f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11
f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11
kernel_y = [hy, ly]
kernel_x = [hx, lx]
Args:
features: The features are in shape of [batch_size, num_boxes,
output_size * 2, output_size * 2, num_filters].
kernel_y: Tensor of size [batch_size, boxes, output_size, 2, 1].
kernel_x: Tensor of size [batch_size, boxes, output_size, 2, 1].
Returns:
A 5-D tensor representing feature crop of shape
[batch_size, num_boxes, output_size, output_size, num_filters].
"""
features_shape = tf.shape(features)
batch_size, num_boxes, output_size, num_filters = (
features_shape[0],
features_shape[1],
features_shape[2],
features_shape[4],
)
output_size = output_size // 2
kernel_y = tf.reshape(kernel_y, [batch_size, num_boxes, output_size * 2, 1])
kernel_x = tf.reshape(kernel_x, [batch_size, num_boxes, 1, output_size * 2])
# Use implicit broadcast to generate the interpolation kernel. The
# multiplier `4` is for avg pooling.
interpolation_kernel = kernel_y * kernel_x * 4
# Interpolate the gathered features with computed interpolation kernels.
features *= tf.cast(
tf.expand_dims(interpolation_kernel, axis=-1), dtype=features.dtype
)
features = tf.reshape(
features,
[batch_size * num_boxes, output_size * 2, output_size * 2, num_filters],
)
features = tf.nn.avg_pool(features, [1, 2, 2, 1], [1, 2, 2, 1], "VALID")
features = tf.reshape(
features, [batch_size, num_boxes, output_size, output_size, num_filters]
)
return features
def _compute_grid_positions(
boxes: tf.Tensor,
boundaries: tf.Tensor,
output_size: int,
sample_offset: float,
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
"""
Computes the grid position w.r.t. the corresponding feature map.
Args:
boxes: a 3-D tensor of shape [batch_size, num_boxes, 4] encoding the
information of each box w.r.t. the corresponding feature map.
boxes[:, :, 0:2] are the grid position in (y, x) (float) of the top-left
corner of each box. boxes[:, :, 2:4] are the box sizes in (h, w) (float)
in terms of the number of pixels of the corresponding feature map
size.
boundaries: a 3-D tensor of shape [batch_size, num_boxes, 2] representing
the boundary (in (y, x)) of the corresponding feature map for each box.
Any resampled grid points that go beyond the boundary will be clipped.
output_size: a scalar indicating the output crop size.
sample_offset: a float number in [0, 1] indicates the subpixel sample
offset from grid point.
Returns:
kernel_y: Tensor of size [batch_size, boxes, output_size, 2, 1].
kernel_x: Tensor of size [batch_size, boxes, output_size, 2, 1].
box_grid_y0y1: Tensor of size [batch_size, boxes, output_size, 2]
box_grid_x0x1: Tensor of size [batch_size, boxes, output_size, 2]
"""
boxes_shape = tf.shape(boxes)
batch_size, num_boxes = boxes_shape[0], boxes_shape[1]
if batch_size is None:
batch_size = tf.shape(boxes)[0]
box_grid_x = []
box_grid_y = []
for i in range(output_size):
box_grid_x.append(
boxes[:, :, 1] + (i + sample_offset) * boxes[:, :, 3] / output_size
)
box_grid_y.append(
boxes[:, :, 0] + (i + sample_offset) * boxes[:, :, 2] / output_size
)
box_grid_x = tf.stack(box_grid_x, axis=2)
box_grid_y = tf.stack(box_grid_y, axis=2)
box_grid_y0 = tf.floor(box_grid_y)
box_grid_x0 = tf.floor(box_grid_x)
box_grid_x0 = tf.maximum(tf.cast(0.0, dtype=box_grid_x0.dtype), box_grid_x0)
box_grid_y0 = tf.maximum(tf.cast(0.0, dtype=box_grid_y0.dtype), box_grid_y0)
box_grid_x0 = tf.minimum(
box_grid_x0, tf.expand_dims(boundaries[:, :, 1], -1)
)
box_grid_x1 = tf.minimum(
box_grid_x0 + 1, tf.expand_dims(boundaries[:, :, 1], -1)
)
box_grid_y0 = tf.minimum(
box_grid_y0, tf.expand_dims(boundaries[:, :, 0], -1)
)
box_grid_y1 = tf.minimum(
box_grid_y0 + 1, tf.expand_dims(boundaries[:, :, 0], -1)
)
box_gridx0x1 = tf.stack([box_grid_x0, box_grid_x1], axis=-1)
box_gridy0y1 = tf.stack([box_grid_y0, box_grid_y1], axis=-1)
# The RoIAlign feature f can be computed by bilinear interpolation of four
# neighboring feature points f0, f1, f2, and f3.
# f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T
# [f10, f11]]
# f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11
# f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11
ly = box_grid_y - box_grid_y0
lx = box_grid_x - box_grid_x0
hy = 1.0 - ly
hx = 1.0 - lx
kernel_y = tf.reshape(
tf.stack([hy, ly], axis=3), [batch_size, num_boxes, output_size, 2, 1]
)
kernel_x = tf.reshape(
tf.stack([hx, lx], axis=3), [batch_size, num_boxes, output_size, 2, 1]
)
return kernel_y, kernel_x, box_gridy0y1, box_gridx0x1
def multilevel_crop_and_resize(
features: Dict[str, tf.Tensor],
boxes: tf.Tensor,
output_size: int = 7,
sample_offset: float = 0.5,
) -> tf.Tensor:
"""
Crop and resize on multilevel feature pyramid.
Generate the (output_size, output_size) set of pixels for each input box
by first locating the box into the correct feature level, and then cropping
and resizing it using the corresponding feature map of that level.
Args:
features: A dictionary with key as pyramid level and value as features.
The pyramid level keys need to be represented by strings like so:
"P2", "P3", "P4", and so on.
The features are in shape of [batch_size, height_l, width_l,
num_filters].
boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row
represents a box with [y1, x1, y2, x2] in un-normalized coordinates.
output_size: A scalar to indicate the output crop size.
sample_offset: a float number in [0, 1] indicates the subpixel sample
offset from grid point.
Returns:
A 5-D tensor representing feature crop of shape
[batch_size, num_boxes, output_size, output_size, num_filters].
"""
with tf.name_scope("multilevel_crop_and_resize"):
levels_str = list(features.keys())
# Levels are represented by strings with a prefix "P" to represent
# pyramid levels. The integer level can be obtained by looking at
# the value that follows the "P".
levels = [int(level_str[1:]) for level_str in levels_str]
min_level = min(levels)
max_level = max(levels)
features_shape = tf.shape(features[f"P{min_level}"])
batch_size, max_feature_height, max_feature_width, num_filters = (
features_shape[0],
features_shape[1],
features_shape[2],
features_shape[3],
)
num_boxes = tf.shape(boxes)[1]
# Stack feature pyramid into a features_all of shape
# [batch_size, levels, height, width, num_filters].
features_all = []
feature_heights = []
feature_widths = []
for level in range(min_level, max_level + 1):
shape = features[f"P{level}"].get_shape().as_list()
feature_heights.append(shape[1])
feature_widths.append(shape[2])
# Concat tensor of [batch_size, height_l * width_l, num_filters] for
# each level.
features_all.append(
tf.reshape(features[f"P{level}"], [batch_size, -1, num_filters])
)
features_r2 = tf.reshape(tf.concat(features_all, 1), [-1, num_filters])
# Calculate height_l * width_l for each level.
level_dim_sizes = [
feature_widths[i] * feature_heights[i]
for i in range(len(feature_widths))
]
# level_dim_offsets is accumulated sum of level_dim_size.
level_dim_offsets = [0]
for i in range(len(feature_widths) - 1):
level_dim_offsets.append(level_dim_offsets[i] + level_dim_sizes[i])
batch_dim_size = level_dim_offsets[-1] + level_dim_sizes[-1]
level_dim_offsets = tf.constant(level_dim_offsets, tf.int32)
height_dim_sizes = tf.constant(feature_widths, tf.int32)
# Assigns boxes to the right level.
box_width = boxes[:, :, 3] - boxes[:, :, 1]
box_height = boxes[:, :, 2] - boxes[:, :, 0]
areas_sqrt = tf.sqrt(
tf.cast(box_height, tf.float32) * tf.cast(box_width, tf.float32)
)
# following the FPN paper to divide by 224.
levels = tf.cast(
tf.math.floordiv(
tf.math.log(tf.math.divide_no_nan(areas_sqrt, 224.0)),
tf.math.log(2.0),
)
+ 4.0,
dtype=tf.int32,
)
# Maps levels between [min_level, max_level].
levels = tf.minimum(max_level, tf.maximum(levels, min_level))
# Projects box location and sizes to corresponding feature levels.
scale_to_level = tf.cast(
tf.pow(tf.constant(2.0), tf.cast(levels, tf.float32)),
dtype=boxes.dtype,
)
boxes /= tf.expand_dims(scale_to_level, axis=2)
box_width /= scale_to_level
box_height /= scale_to_level
boxes = tf.concat(
[
boxes[:, :, 0:2],
tf.expand_dims(box_height, -1),
tf.expand_dims(box_width, -1),
],
axis=-1,
)
# Maps levels to [0, max_level-min_level].
levels -= min_level
level_strides = tf.pow([[2.0]], tf.cast(levels, tf.float32))
boundary = tf.cast(
tf.concat(
[
tf.expand_dims(
[[tf.cast(max_feature_height, tf.float32)]]
/ level_strides
- 1,
axis=-1,
),
tf.expand_dims(
[[tf.cast(max_feature_width, tf.float32)]]
/ level_strides
- 1,
axis=-1,
),
],
axis=-1,
),
boxes.dtype,
)
# Compute grid positions.
(
kernel_y,
kernel_x,
box_gridy0y1,
box_gridx0x1,
) = _compute_grid_positions(boxes, boundary, output_size, sample_offset)
x_indices = tf.cast(
tf.reshape(box_gridx0x1, [batch_size, num_boxes, output_size * 2]),
dtype=tf.int32,
)
y_indices = tf.cast(
tf.reshape(box_gridy0y1, [batch_size, num_boxes, output_size * 2]),
dtype=tf.int32,
)
batch_size_offset = tf.tile(
tf.reshape(
tf.range(batch_size) * batch_dim_size, [batch_size, 1, 1, 1]
),
[1, num_boxes, output_size * 2, output_size * 2],
)
# Get level offset for each box. Each box belongs to one level.
levels_offset = tf.tile(
tf.reshape(
tf.gather(level_dim_offsets, levels),
[batch_size, num_boxes, 1, 1],
),
[1, 1, output_size * 2, output_size * 2],
)
y_indices_offset = tf.tile(
tf.reshape(
y_indices
* tf.expand_dims(tf.gather(height_dim_sizes, levels), -1),
[batch_size, num_boxes, output_size * 2, 1],
),
[1, 1, 1, output_size * 2],
)
x_indices_offset = tf.tile(
tf.reshape(x_indices, [batch_size, num_boxes, 1, output_size * 2]),
[1, 1, output_size * 2, 1],
)
indices = tf.reshape(
batch_size_offset
+ levels_offset
+ y_indices_offset
+ x_indices_offset,
[-1],
)
# TODO(tanzhenyu): replace tf.gather with tf.gather_nd and try to get
# similar performance.
features_per_box = tf.reshape(
tf.gather(features_r2, indices),
[
batch_size,
num_boxes,
output_size * 2,
output_size * 2,
num_filters,
],
)
# Bilinear interpolation.
features_per_box = _feature_bilinear_interpolation(
features_per_box, kernel_y, kernel_x
)
return features_per_box
# TODO(tanzhenyu): Remove this implementation once roi_pool has better
# performance as this is mostly a duplicate of
# https://github.com/tensorflow/models/blob/master/official/legacy/detection/ops/spatial_transform_ops.py#L324
@keras.utils.register_keras_serializable(package="keras_cv")
class _ROIAligner(keras.layers.Layer):
"""Performs ROIAlign for the second stage processing."""
def __init__(
self,
bounding_box_format,
target_size=7,
sample_offset: float = 0.5,
**kwargs,
):
"""
Generates ROI Aligner.
Args:
bounding_box_format: the input format for boxes.
crop_size: An `int` of the output size of the cropped features.
sample_offset: A `float` in [0, 1] of the subpixel sample offset.
**kwargs: Additional keyword arguments passed to Layer.
"""
assert_tf_keras("keras_cv.layers._ROIAligner")
self._config_dict = {
"bounding_box_format": bounding_box_format,
"crop_size": target_size,
"sample_offset": sample_offset,
}
super().__init__(**kwargs)
def call(
self,
features: Mapping[str, tf.Tensor],
boxes: tf.Tensor,
training: Optional[bool] = None,
):
"""
Args:
features: A dictionary with key as pyramid level and value as
features. The features are in shape of
[batch_size, height_l, width_l, num_filters].
boxes: A 3-D `tf.Tensor` of shape [batch_size, num_boxes, 4]. Each row
represents a box with [y1, x1, y2, x2] in un-normalized coordinates.
from grid point.
training: A `bool` of whether it is in training mode.
Returns:
A 5-D `tf.Tensor` representing feature crop of shape
[batch_size, num_boxes, crop_size, crop_size, num_filters].
"""
boxes = bounding_box.convert_format(
boxes,
source=self._config_dict["bounding_box_format"],
target="yxyx",
)
roi_features = multilevel_crop_and_resize(
features,
boxes,
output_size=self._config_dict["crop_size"],
sample_offset=self._config_dict["sample_offset"],
)
return roi_features
def get_config(self):
return self._config_dict
| keras-cv/keras_cv/layers/object_detection/roi_align.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/roi_align.py",
"repo_id": "keras-cv",
"token_count": 7772
} | 12 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers.object_detection_3d import voxel_utils
from keras_cv.tests.test_case import TestCase
class PadOrTrimToTest(TestCase):
"""Tests for pad_or_trim_to, branched from
https://github.com/tensorflow/lingvo/blob/master/lingvo/core/py_utils_test.py.
"""
def test_2D_constant_shape_pad(self):
x = tf.random.normal(shape=(3, 3), seed=123456)
shape = [4, 6]
padded_x_right = voxel_utils._pad_or_trim_to(x, shape, pad_val=0)
padded_x_left = voxel_utils._pad_or_trim_to(
x, shape, pad_val=0, pad_after_contents=False
)
self.assertEqual(padded_x_right.shape.as_list(), [4, 6])
self.assertEqual(padded_x_left.shape.as_list(), [4, 6])
real_x_right, real_x_left = self.evaluate(
[padded_x_right, padded_x_left]
)
expected_x_right = [
[0.38615, 2.975221, -0.852826, 0.0, 0.0, 0.0],
[-0.571142, -0.432439, 0.413158, 0.0, 0.0, 0.0],
[0.255314, -0.985647, 1.461641, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
self.assertAllClose(expected_x_right, real_x_right)
expected_x_left = [
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.38615, 2.975221, -0.852826],
[0.0, 0.0, 0.0, -0.571142, -0.432439, 0.413158],
[0.0, 0.0, 0.0, 0.255314, -0.985647, 1.461641],
]
self.assertAllClose(expected_x_left, real_x_left)
def test_2D_constant_shape_trim(self):
x = tf.random.normal(shape=(3, 3), seed=123456)
shape = [1, 3]
trimmed_x_right = voxel_utils._pad_or_trim_to(x, shape, pad_val=0)
trimmed_x_left = voxel_utils._pad_or_trim_to(
x, shape, pad_val=0, pad_after_contents=False
)
self.assertEqual(trimmed_x_right.shape.as_list(), [1, 3])
self.assertEqual(trimmed_x_left.shape.as_list(), [1, 3])
real_x_right, real_x_left = self.evaluate(
[trimmed_x_right, trimmed_x_left]
)
expected_x_right = [[0.38615, 2.975221, -0.852826]]
self.assertAllClose(expected_x_right, real_x_right)
expected_x_left = [[0.255314, -0.985647, 1.461641]]
self.assertAllClose(expected_x_left, real_x_left)
| keras-cv/keras_cv/layers/object_detection_3d/voxel_utils_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection_3d/voxel_utils_test.py",
"repo_id": "keras-cv",
"token_count": 1406
} | 13 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.Equalization")
class Equalization(VectorizedBaseImageAugmentationLayer):
"""Equalization performs histogram equalization on a channel-wise basis.
Args:
value_range: a tuple or a list of two elements. The first value
represents the lower bound for values in passed images, the second
represents the upper bound. Images passed to the layer should have
values within `value_range`.
bins: Integer indicating the number of bins to use in histogram
equalization. Should be in the range [0, 256].
Usage:
```python
equalize = Equalization()
(images, labels), _ = keras.datasets.cifar10.load_data()
# Note that images are an int8 Tensor with values in the range [0, 255]
images = equalize(images)
```
Call arguments:
images: Tensor of pixels in range [0, 255], in RGB format. Can be
of type float or int. Should be in NHWC format.
"""
def __init__(self, value_range, bins=256, **kwargs):
super().__init__(**kwargs)
self.bins = bins
self.value_range = value_range
def equalize_channel(self, images, channel_index):
"""equalize_channel performs histogram equalization on a single channel.
Args:
image: int Tensor with pixels in range [0, 255], RGB format,
with channels last
channel_index: channel to equalize
"""
is_single_image = tf.rank(images) == 4 and tf.shape(images)[0] == 1
images = images[..., channel_index]
# Compute the histogram of the image channel.
# If the input is not a batch of images, directly using
# tf.histogram_fixed_width is much faster than using tf.vectorized_map
if is_single_image:
histogram = tf.histogram_fixed_width(
images, [0, 255], nbins=self.bins
)
histogram = tf.expand_dims(histogram, axis=0)
else:
partial_hist = partial(
tf.histogram_fixed_width, value_range=[0, 255], nbins=self.bins
)
histogram = tf.vectorized_map(
partial_hist, images, fallback_to_while_loop=True, warn=True
)
# For the purposes of computing the step, filter out the non-zeros.
# Zeroes are replaced by a big number while calculating min to keep
# shape constant across input sizes for compatibility with
# vectorized_map
big_number = 1410065408
histogram_without_zeroes = tf.where(
tf.equal(histogram, 0),
big_number,
histogram,
)
step = (
tf.reduce_sum(histogram, axis=-1)
- tf.reduce_min(histogram_without_zeroes, axis=-1)
) // (self.bins - 1)
def build_mapping(histogram, step):
bacth_size = tf.shape(histogram)[0]
# Replace where step is 0 with 1 to avoid division by 0.
# This doesn't change the result, because where step==0 the
# original image is returned
_step = tf.where(
tf.equal(step, 0),
1,
step,
)
_step = tf.expand_dims(_step, -1)
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lookup_table = (
tf.cumsum(histogram, axis=-1) + (_step // 2)
) // _step
# Shift lookup_table, prepending with 0.
lookup_table = tf.concat(
[tf.tile([[0]], [bacth_size, 1]), lookup_table[..., :-1]],
axis=1,
)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lookup_table, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lookup table from the full histogram and step and then index from it.
# The lookup table is built for all images,
# regardless of the corresponding value of step.
result = tf.where(
tf.reshape(tf.equal(step, 0), (-1, 1, 1)),
images,
tf.gather(
build_mapping(histogram, step), images, batch_dims=1, axis=1
),
)
return result
def augment_images(self, images, transformations=None, **kwargs):
images = preprocessing.transform_value_range(
images, self.value_range, (0, 255), dtype=self.compute_dtype
)
images = tf.cast(images, tf.int32)
images = tf.map_fn(
lambda channel: self.equalize_channel(images, channel),
tf.range(tf.shape(images)[-1]),
)
images = tf.transpose(images, [1, 2, 3, 0])
images = tf.cast(images, self.compute_dtype)
images = preprocessing.transform_value_range(
images, (0, 255), self.value_range, dtype=self.compute_dtype
)
return images
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations=None, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints
def augment_targets(self, targets, transformations, **kwargs):
return targets
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def get_config(self):
config = super().get_config()
config.update({"bins": self.bins, "value_range": self.value_range})
return config
| keras-cv/keras_cv/layers/preprocessing/equalization.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/equalization.py",
"repo_id": "keras-cv",
"token_count": 2893
} | 14 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import layers
from keras_cv.tests.test_case import TestCase
CONSISTENT_OUTPUT_TEST_CONFIGURATIONS = [
("AutoContrast", layers.AutoContrast, {"value_range": (0, 255)}),
("ChannelShuffle", layers.ChannelShuffle, {}),
("Equalization", layers.Equalization, {"value_range": (0, 255)}),
("Grayscale", layers.Grayscale, {}),
("GridMask", layers.GridMask, {}),
(
"Posterization",
layers.Posterization,
{"bits": 3, "value_range": (0, 255)},
),
(
"RandomColorDegeneration",
layers.RandomColorDegeneration,
{"factor": 0.5},
),
(
"RandomCutout",
layers.RandomCutout,
{"height_factor": 0.2, "width_factor": 0.2},
),
(
"RandomHue",
layers.RandomHue,
{"factor": 0.5, "value_range": (0, 255)},
),
(
"RandomChannelShift",
layers.RandomChannelShift,
{"value_range": (0, 255), "factor": 0.5},
),
(
"RandomColorJitter",
layers.RandomColorJitter,
{
"value_range": (0, 255),
"brightness_factor": (-0.2, 0.5),
"contrast_factor": (0.5, 0.9),
"saturation_factor": (0.5, 0.9),
"hue_factor": (0.5, 0.9),
"seed": 1,
},
),
(
"RandomContrast",
layers.RandomContrast,
{"value_range": (0, 255), "factor": 0.5},
),
(
"RandomGaussianBlur",
layers.RandomGaussianBlur,
{"kernel_size": 3, "factor": (0.0, 3.0)},
),
("RandomFlip", layers.RandomFlip, {"mode": "horizontal"}),
("RandomJpegQuality", layers.RandomJpegQuality, {"factor": (75, 100)}),
("RandomRotation", layers.RandomRotation, {"factor": 0.5}),
("RandomSaturation", layers.RandomSaturation, {"factor": 0.5}),
(
"RandomSharpness",
layers.RandomSharpness,
{"factor": 0.5, "value_range": (0, 255)},
),
("RandomShear", layers.RandomShear, {"x_factor": 0.3, "y_factor": 0.3}),
(
"RandomTranslation",
layers.RandomTranslation,
{"height_factor": 0.5, "width_factor": 0.5},
),
(
"RandomZoom",
layers.RandomZoom,
{"height_factor": 0.2, "width_factor": 0.5},
),
("Solarization", layers.Solarization, {"value_range": (0, 255)}),
(
"RandomBrightness",
layers.RandomBrightness,
{"factor": (1, 1), "value_range": (0, 1)},
),
]
DENSE_OUTPUT_TEST_CONFIGURATIONS = [
(
"JitteredResize",
layers.JitteredResize,
{
"target_size": (224, 224),
"scale_factor": (0.8, 1.25),
"bounding_box_format": "xywh",
},
),
(
"RandomCrop",
layers.RandomCrop,
{"height": 2, "width": 2},
),
(
"RandomCropAndResize",
layers.RandomCropAndResize,
{
"target_size": (224, 224),
"crop_area_factor": (0.8, 1.0),
"aspect_ratio_factor": (3 / 4, 4 / 3),
},
),
(
"Resizing",
layers.Resizing,
{
"height": 224,
"width": 224,
},
),
]
RAGGED_OUTPUT_TEST_CONFIGURATIONS = [
("RandomAspectRatio", layers.RandomAspectRatio, {"factor": (0.9, 1.1)}),
]
class RaggedImageTest(TestCase):
@parameterized.named_parameters(*CONSISTENT_OUTPUT_TEST_CONFIGURATIONS)
def test_preserves_ragged_status(self, layer_cls, init_args):
layer = layer_cls(**init_args)
inputs = tf.ragged.stack(
[
np.ones((5, 5, 3)),
np.ones((8, 8, 3)),
]
)
outputs = layer(inputs)
self.assertTrue(isinstance(outputs, tf.RaggedTensor))
@parameterized.named_parameters(*DENSE_OUTPUT_TEST_CONFIGURATIONS)
def test_converts_ragged_to_dense(self, layer_cls, init_args):
layer = layer_cls(**init_args)
inputs = tf.ragged.stack(
[
np.ones((5, 5, 3)),
np.ones((8, 8, 3)),
]
)
outputs = layer(inputs)
self.assertTrue(isinstance(outputs, tf.Tensor))
@parameterized.named_parameters(*RAGGED_OUTPUT_TEST_CONFIGURATIONS)
def test_dense_to_ragged(self, layer_cls, init_args):
layer = layer_cls(**init_args)
inputs = np.ones((8, 512, 512, 3))
outputs = layer(inputs)
self.assertTrue(isinstance(outputs, tf.RaggedTensor))
| keras-cv/keras_cv/layers/preprocessing/ragged_image_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/ragged_image_test.py",
"repo_id": "keras-cv",
"token_count": 2445
} | 15 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv.backend import ops
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class RandomColorDegenerationTest(TestCase):
def test_random_color_degeneration_base_case(self):
img_shape = (50, 50, 3)
xs = tf.stack(
[2 * np.ones(img_shape), np.ones(img_shape)],
axis=0,
)
layer = preprocessing.RandomColorDegeneration(0.0)
ys = layer(xs)
self.assertEqual(xs.shape, ys.shape)
def test_color_degeneration_full_factor(self):
img_shape = (50, 50, 1)
r = np.ones(img_shape)
g = 2 * np.ones(img_shape)
b = 3 * np.ones(img_shape)
xs = tf.concat([r, g, b], axis=-1)
layer = preprocessing.RandomColorDegeneration(factor=(1, 1))
ys = ops.convert_to_numpy(layer(xs))
# Color degeneration uses standard luma conversion for RGB->Grayscale.
# The formula for luma is result= 0.2989*r + 0.5870*g + 0.1140*b
luma_result = 0.2989 + 2 * 0.5870 + 3 * 0.1140
self.assertAllClose(ys, np.ones_like(ys) * luma_result)
def test_color_degeneration_70p_factor(self):
img_shape = (50, 50, 1)
r = np.ones(img_shape)
g = 2 * np.ones(img_shape)
b = 3 * np.ones(img_shape)
xs = tf.concat([r, g, b], axis=-1)
layer = preprocessing.RandomColorDegeneration(factor=(0.7, 0.7))
ys = ops.convert_to_numpy(layer(xs))
# Color degeneration uses standard luma conversion for RGB->Grayscale.
# The formula for luma is result= 0.2989*r + 0.5870*g + 0.1140*b
luma_result = 0.2989 + 2 * 0.5870 + 3 * 0.1140
# with factor=0.7, luma_result should be blended at a 70% rate with the
# original
r_result = luma_result * 0.7 + 1 * 0.3
g_result = luma_result * 0.7 + 2 * 0.3
b_result = luma_result * 0.7 + 3 * 0.3
r = ys[..., 0]
g = ys[..., 1]
b = ys[..., 2]
self.assertAllClose(r, np.ones_like(r) * r_result)
self.assertAllClose(g, np.ones_like(g) * g_result)
self.assertAllClose(b, np.ones_like(b) * b_result)
| keras-cv/keras_cv/layers/preprocessing/random_color_degeneration_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_color_degeneration_test.py",
"repo_id": "keras-cv",
"token_count": 1212
} | 16 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from absl.testing import parameterized
from keras_cv import core
from keras_cv.backend import ops
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class RandomHueTest(TestCase):
def test_preserves_output_shape(self):
image_shape = (4, 8, 8, 3)
image = np.random.uniform(size=image_shape) * 255.0
layer = preprocessing.RandomHue(factor=(0.3, 0.8), value_range=(0, 255))
output = layer(image)
self.assertEqual(image.shape, output.shape)
self.assertNotAllClose(image, output)
def test_adjust_no_op(self):
image_shape = (4, 8, 8, 3)
image = np.random.uniform(size=image_shape) * 255.0
layer = preprocessing.RandomHue(factor=(0.0, 0.0), value_range=(0, 255))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
def test_adjust_full_opposite_hue(self):
image_shape = (4, 8, 8, 3)
image = np.random.uniform(size=image_shape) * 255.0
layer = preprocessing.RandomHue(factor=(1.0, 1.0), value_range=(0, 255))
output = ops.convert_to_numpy(layer(image))
channel_max = np.max(output, axis=-1)
channel_min = np.min(output, axis=-1)
# Make sure the max and min channel are the same between input and
# output. In the meantime, and channel will swap between each other.
self.assertAllClose(
channel_max,
np.max(image, axis=-1),
atol=1e-5,
rtol=1e-5,
)
self.assertAllClose(
channel_min,
np.min(image, axis=-1),
atol=1e-5,
rtol=1e-5,
)
@parameterized.named_parameters(
("025", 0.25), ("05", 0.5), ("075", 0.75), ("100", 1.0)
)
def test_adjusts_all_values_for_factor(self, factor):
image_shape = (4, 8, 8, 3)
# Value range (0, 100)
image = np.random.uniform(size=image_shape) * 100.0
layer = preprocessing.RandomHue(
factor=(factor, factor), value_range=(0, 255)
)
output = layer(image)
self.assertNotAllClose(image, output, atol=1e-5, rtol=1e-5)
def test_adjustment_for_non_rgb_value_range(self):
image_shape = (4, 8, 8, 3)
# Value range (0, 100)
image = np.random.uniform(size=image_shape) * 100.0
layer = preprocessing.RandomHue(factor=(0.0, 0.0), value_range=(0, 255))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
layer = preprocessing.RandomHue(factor=(0.3, 0.8), value_range=(0, 255))
output = layer(image)
self.assertNotAllClose(image, output)
def test_with_uint8(self):
image_shape = (4, 8, 8, 3)
image = (np.random.uniform(size=image_shape) * 255.0).astype(np.uint8)
layer = preprocessing.RandomHue(factor=(0.0, 0.0), value_range=(0, 255))
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
layer = preprocessing.RandomHue(factor=(0.3, 0.8), value_range=(0, 255))
output = layer(image)
self.assertNotAllClose(image, output)
def test_config(self):
layer = preprocessing.RandomHue(factor=(0.3, 0.8), value_range=(0, 255))
config = layer.get_config()
self.assertTrue(isinstance(config["factor"], core.UniformFactorSampler))
self.assertEqual(config["factor"].get_config()["lower"], 0.3)
self.assertEqual(config["factor"].get_config()["upper"], 0.8)
self.assertEqual(config["value_range"], (0, 255))
| keras-cv/keras_cv/layers/preprocessing/random_hue_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_hue_test.py",
"repo_id": "keras-cv",
"token_count": 1825
} | 17 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
import keras_cv.layers as cv_layers
from keras_cv.backend.config import keras_3
from keras_cv.tests.test_case import TestCase
class RepeatedAugmentationTest(TestCase):
@pytest.mark.skipif(keras_3(), reason="Disabled for Keras 3")
def test_output_shapes(self):
repeated_augment = cv_layers.RepeatedAugmentation(
augmenters=[
cv_layers.RandAugment(value_range=(0, 255)),
cv_layers.RandomFlip(),
]
)
inputs = {
"images": tf.ones((8, 512, 512, 3)),
"labels": tf.ones((8,)),
}
outputs = repeated_augment(inputs)
self.assertEqual(outputs["images"].shape, (16, 512, 512, 3))
self.assertEqual(outputs["labels"].shape, (16,))
@pytest.mark.skipif(keras_3(), reason="disabling test for Keras 3")
def test_with_mix_up(self):
repeated_augment = cv_layers.RepeatedAugmentation(
augmenters=[
cv_layers.RandAugment(value_range=(0, 255)),
cv_layers.MixUp(),
]
)
inputs = {
"images": tf.ones((8, 512, 512, 3)),
"labels": tf.ones((8, 10)),
}
outputs = repeated_augment(inputs)
self.assertEqual(outputs["images"].shape, (16, 512, 512, 3))
self.assertEqual(outputs["labels"].shape, (16, 10))
| keras-cv/keras_cv/layers/preprocessing/repeated_augmentation_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/repeated_augmentation_test.py",
"repo_id": "keras-cv",
"token_count": 841
} | 18 |
Copyright (c) 2023 Waymo LLC. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
Additional IP Rights Grant (Patents)
"Works" means the code located at keras_cv/layers/preprocessing_3d/waymo
licensed from Waymo LLC ("Waymo") for inclusion in the KerasCV project at
github.com/keras-team/keras-cv. “Patents" means the pending U.S. Patent App.
No. 63/418,259 and any issued patents arising therefrom. Subject to the terms
and conditions of this license, Waymo hereby grants to you a limited worldwide,
non-exclusive, royalty-free, personal patent license to make, have made, use,
and import the Works, where such license applies only to those Patent claims
that are necessarily infringed by the Works executing the ”preprocessing_3d”
augmentation library on 3D perception tasks using the
“lidaraugment_keraspolicy.py” file. This grant does not include claims that
would be infringed by combining the Works with other works, utilizing the Works
on other tasks, or as a consequence of further modification of the Works. If
you or your agent or exclusive licensee institute or order or agree to the
institution of patent litigation or any other patent enforcement activity
against any entity (including a cross-claim or counterclaim in a lawsuit)
alleging that the Works or any activity using the Works to execute functions for
3D perception tasks constitutes direct or contributory patent infringement, or
inducement of patent infringement, then any patent rights granted to you under
this license for the Works shall terminate as of the date such litigation is
filed.
DISCLAIMER
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/LICENSE/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/LICENSE",
"repo_id": "keras-cv",
"token_count": 768
} | 19 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.point_cloud import group_points_by_boxes
from keras_cv.point_cloud import is_within_box3d
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
OBJECT_POINT_CLOUDS = base_augmentation_layer_3d.OBJECT_POINT_CLOUDS
OBJECT_BOUNDING_BOXES = base_augmentation_layer_3d.OBJECT_BOUNDING_BOXES
@keras_cv_export("keras_cv.layers.GroupPointsByBoundingBoxes")
class GroupPointsByBoundingBoxes(
base_augmentation_layer_3d.BaseAugmentationLayer3D
):
"""A preprocessing layer which groups point clouds based on bounding boxes
during training.
This layer will group point clouds based on bounding boxes and generate
OBJECT_POINT_CLOUDS and OBJECT_BOUNDING_BOXES tensors.
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 5 features are [x, y, z, class, range].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features]. Boxes are expected
to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
Output shape:
A dictionary of Tensors with the same shape as input Tensors and two
additional items for OBJECT_POINT_CLOUDS (shape [num of frames, num of
valid boxes, max num of points, num of point features]) and
OBJECT_BOUNDING_BOXES (shape [num of frames, num of valid boxes, num of
box features]).
Arguments:
label_index: An optional int scalar sets the target object index.
Bounding boxes and corresponding point clouds with box class ==
label_index will be saved as OBJECT_BOUNDING_BOXES and
OBJECT_POINT_CLOUDS. If label index is None, all valid bounding boxes
(box class !=0) are used.
min_points_per_bounding_boxes: A int scalar sets the min number of points
in a bounding box. If a bounding box contains less than
min_points_per_bounding_boxes, the bounding box is filtered out.
max_points_per_bounding_boxes: A int scalar sets the max number of points
in a bounding box. All the object point clouds will be padded or trimmed
to the same shape, where the number of points dimension is
max_points_per_bounding_boxes.
"""
def __init__(
self,
label_index=None,
min_points_per_bounding_boxes=0,
max_points_per_bounding_boxes=2000,
**kwargs
):
super().__init__(**kwargs)
if label_index and label_index < 0:
raise ValueError("label_index must be >=0 or None.")
if min_points_per_bounding_boxes < 0:
raise ValueError("min_points_per_bounding_boxes must be >=0.")
if max_points_per_bounding_boxes < 0:
raise ValueError("max_points_per_bounding_boxes must be >=0.")
if min_points_per_bounding_boxes > max_points_per_bounding_boxes:
raise ValueError(
"max_paste_bounding_boxes must be >= "
"min_points_per_bounding_boxes."
)
self._label_index = label_index
self._min_points_per_bounding_boxes = min_points_per_bounding_boxes
self._max_points_per_bounding_boxes = max_points_per_bounding_boxes
self._auto_vectorize = False
def get_config(self):
return {
"label_index": self._label_index,
"min_points_per_bounding_boxes": self._min_points_per_bounding_boxes, # noqa: E501
"max_points_per_bounding_boxes": self._max_points_per_bounding_boxes, # noqa: E501
}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, **kwargs
):
if self._label_index:
bounding_boxes_mask = (
bounding_boxes[0, :, CENTER_XYZ_DXDYDZ_PHI.CLASS]
== self._label_index
)
object_bounding_boxes = tf.boolean_mask(
bounding_boxes, bounding_boxes_mask, axis=1
)
else:
bounding_boxes_mask = (
bounding_boxes[0, :, CENTER_XYZ_DXDYDZ_PHI.CLASS] > 0.0
)
object_bounding_boxes = tf.boolean_mask(
bounding_boxes, bounding_boxes_mask, axis=1
)
points_in_bounding_boxes = is_within_box3d(
point_clouds[:, :, :3], object_bounding_boxes[:, :, :7]
)
# Filter bounding boxes using the current frame.
# [num_boxes]
min_points_filter = (
tf.reduce_sum(
tf.cast(points_in_bounding_boxes[0], dtype=tf.int32), axis=0
)
>= self._min_points_per_bounding_boxes
)
object_bounding_boxes = tf.boolean_mask(
object_bounding_boxes, min_points_filter, axis=1
)
points_in_bounding_boxes = tf.boolean_mask(
points_in_bounding_boxes, min_points_filter, axis=2
)
# [num of frames, num of boxes, num of points].
points_in_bounding_boxes = tf.transpose(
points_in_bounding_boxes, [0, 2, 1]
)
points_in_bounding_boxes = tf.cast(points_in_bounding_boxes, tf.int32)
sort_valid_index = tf.argsort(
points_in_bounding_boxes, axis=-1, direction="DESCENDING"
)
sort_valid_mask = tf.gather(
points_in_bounding_boxes, sort_valid_index, axis=2, batch_dims=2
)[:, :, : self._max_points_per_bounding_boxes]
# [num of frames, num of boxes, self._max_points_per_bounding_boxes, num
# of point features].
object_point_clouds = point_clouds[:, tf.newaxis, :, :]
num_valid_bounding_boxes = tf.shape(object_bounding_boxes)[1]
object_point_clouds = tf.tile(
object_point_clouds, [1, num_valid_bounding_boxes, 1, 1]
)
object_point_clouds = tf.gather(
object_point_clouds, sort_valid_index, axis=2, batch_dims=2
)[:, :, : self._max_points_per_bounding_boxes, :]
object_point_clouds = tf.where(
sort_valid_mask[:, :, :, tf.newaxis] > 0, object_point_clouds, 0.0
)
return (
object_point_clouds,
object_bounding_boxes,
)
def augment_point_clouds_bounding_boxes_v2(
self, point_clouds, bounding_boxes, **kwargs
):
if self._label_index:
bounding_boxes_mask = (
bounding_boxes[0, :, CENTER_XYZ_DXDYDZ_PHI.CLASS]
== self._label_index
)
object_bounding_boxes = tf.boolean_mask(
bounding_boxes, bounding_boxes_mask, axis=1
)
else:
bounding_boxes_mask = (
bounding_boxes[0, :, CENTER_XYZ_DXDYDZ_PHI.CLASS] > 0.0
)
object_bounding_boxes = tf.boolean_mask(
bounding_boxes, bounding_boxes_mask, axis=1
)
# [frames, num_boxes, ragged_points]
points_in_bounding_boxes = group_points_by_boxes(
point_clouds[:, :, :3], object_bounding_boxes[:, :, :7]
)
# Filter bounding boxes using the current frame.
# [num_boxes]
min_points_filter = (
points_in_bounding_boxes.row_lengths(-1)
>= self._min_points_per_bounding_boxes
)
# [frames, num_valid_boxes, box_feature]
object_bounding_boxes = tf.ragged.boolean_mask(
object_bounding_boxes, min_points_filter
)
# [frames, num_valid_boxes, ragged_points]
points_in_bounding_boxes = tf.ragged.boolean_mask(
points_in_bounding_boxes, min_points_filter
)
# point_clouds: [frames, num_points, point_feature]
# object_point_clouds: [frames, num_valid_boxes, ragged_points,
# point_feature]
object_point_clouds = tf.gather(
point_clouds, points_in_bounding_boxes, axis=1, batch_dims=1
)
return (object_point_clouds, object_bounding_boxes)
def _augment(self, inputs):
result = inputs
point_clouds = inputs[POINT_CLOUDS]
bounding_boxes = inputs[BOUNDING_BOXES]
transformation = self.get_random_transformation(
point_clouds=point_clouds,
bounding_boxes=bounding_boxes,
)
(
object_point_clouds,
object_bounding_boxes,
) = self.augment_point_clouds_bounding_boxes(
point_clouds,
bounding_boxes=bounding_boxes,
transformation=transformation,
)
result.update(
{
OBJECT_POINT_CLOUDS: object_point_clouds,
OBJECT_BOUNDING_BOXES: object_bounding_boxes,
}
)
return result
def call(self, inputs):
# TODO(ianstenbit): Support the model input format.
point_clouds = inputs[POINT_CLOUDS]
bounding_boxes = inputs[BOUNDING_BOXES]
if point_clouds.shape.rank == 3 and bounding_boxes.shape.rank == 3:
return self._augment(inputs)
elif point_clouds.shape.rank == 4 and bounding_boxes.shape.rank == 4:
batch = point_clouds.get_shape().as_list()[0]
object_point_clouds_list = []
object_bounding_boxes_list = []
for i in range(batch):
(
object_point_clouds,
object_bounding_boxes,
) = self.augment_point_clouds_bounding_boxes(
inputs[POINT_CLOUDS][i], inputs[BOUNDING_BOXES][i]
)
object_point_clouds_list += [object_point_clouds]
object_bounding_boxes_list += [object_bounding_boxes]
# object_point_clouds shape [num of frames, num of valid boxes,
# max num of points, num of point features].
inputs[OBJECT_POINT_CLOUDS] = tf.concat(
object_point_clouds_list, axis=-3
)
# object_bounding_boxes shape [num of frames, num of valid
# boxes, num of box features].
inputs[OBJECT_BOUNDING_BOXES] = tf.concat(
object_bounding_boxes_list, axis=-2
)
return inputs
else:
raise ValueError(
"Point clouds augmentation layers are expecting inputs "
"point clouds and bounding boxes to be rank 3D (Frame, "
"Point, Feature) or 4D (Batch, Frame, Point, Feature) "
"tensors. Got shape: {} and {}".format(
point_clouds.shape, bounding_boxes.shape
)
)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/group_points_by_bounding_boxes.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/group_points_by_bounding_boxes.py",
"repo_id": "keras-cv",
"token_count": 5266
} | 20 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers.regularization.stochastic_depth import StochasticDepth
from keras_cv.tests.test_case import TestCase
class StochasticDepthTest(TestCase):
FEATURE_SHAPE = (1, 14, 14, 256)
def test_inputs_have_two_elements(self):
inputs = tf.random.uniform(self.FEATURE_SHAPE, 0, 1)
inputs = [inputs, inputs, inputs]
with self.assertRaisesRegex(
ValueError,
"Input must be a list of length 2. " "Got input with length=3.",
):
StochasticDepth()(inputs)
def test_eval_mode(self):
inputs = tf.random.uniform(self.FEATURE_SHAPE, 0, 1)
inputs = [inputs, inputs]
rate = 0.5
outputs = StochasticDepth(rate=rate)(inputs, training=False)
self.assertAllClose(inputs[0] * (1 + rate), outputs)
def test_training_mode(self):
inputs = tf.random.uniform(self.FEATURE_SHAPE, 0, 1)
inputs = [inputs, inputs]
rate = 0.5
outputs = StochasticDepth(rate=rate)(inputs, training=True)
outputs_sum = tf.math.reduce_sum(outputs)
inputs_sum = tf.math.reduce_sum(inputs[0])
self.assertIn(outputs_sum, [inputs_sum, inputs_sum * 2])
| keras-cv/keras_cv/layers/regularization/stochastic_depth_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/regularization/stochastic_depth_test.py",
"repo_id": "keras-cv",
"token_count": 688
} | 21 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.losses.FocalLoss")
class FocalLoss(keras.losses.Loss):
"""Implements Focal loss
Focal loss is a modified cross-entropy designed to perform better with
class imbalance. For this reason, it's commonly used with object detectors.
Args:
alpha: a float value between 0 and 1 representing a weighting factor
used to deal with class imbalance. Positive classes and negative
classes have alpha and (1 - alpha) as their weighting factors
respectively. Defaults to 0.25.
gamma: a positive float value representing the tunable focusing
parameter, defaults to 2.
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, `y_pred` is assumed to encode a probability distribution.
Default to `False`.
label_smoothing: Float in `[0, 1]`. If higher than 0 then smooth the
labels by squeezing them towards `0.5`, i.e., using
`1. - 0.5 * label_smoothing` for the target class and
`0.5 * label_smoothing` for the non-target class.
References:
- [Focal Loss paper](https://arxiv.org/abs/1708.02002)
Standalone usage:
```python
y_true = np.random.uniform(size=[10], low=0, high=4)
y_pred = np.random.uniform(size=[10], low=0, high=4)
loss = FocalLoss()
loss(y_true, y_pred)
```
Usage with the `compile()` API:
```python
model.compile(optimizer='adam', loss=keras_cv.losses.FocalLoss())
```
"""
def __init__(
self,
alpha=0.25,
gamma=2,
from_logits=False,
label_smoothing=0,
**kwargs,
):
super().__init__(**kwargs)
self.alpha = float(alpha)
self.gamma = float(gamma)
self.from_logits = from_logits
self.label_smoothing = label_smoothing
def _smooth_labels(self, y_true):
return (
y_true * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing
)
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if self.label_smoothing:
y_true = self._smooth_labels(y_true)
if self.from_logits:
y_pred = ops.sigmoid(y_pred)
cross_entropy = ops.binary_crossentropy(y_true, y_pred)
alpha = ops.where(
ops.equal(y_true, 1.0), self.alpha, (1.0 - self.alpha)
)
pt = y_true * y_pred + (1.0 - y_true) * (1.0 - y_pred)
loss = (
alpha
* ops.cast(ops.power(1.0 - pt, self.gamma), alpha.dtype)
* ops.cast(cross_entropy, alpha.dtype)
)
# In most losses you mean over the final axis to achieve a scalar
# Focal loss however is a special case in that it is meant to focus on
# a small number of hard examples in a batch. Most of the time this
# comes in the form of thousands of background class boxes and a few
# positive boxes.
# If you mean over the final axis you will get a number close to 0,
# which will encourage your model to exclusively predict background
# class boxes.
return ops.sum(loss, axis=-1)
def get_config(self):
config = super().get_config()
config.update(
{
"alpha": self.alpha,
"gamma": self.gamma,
"from_logits": self.from_logits,
"label_smoothing": self.label_smoothing,
}
)
return config
| keras-cv/keras_cv/losses/focal.py/0 | {
"file_path": "keras-cv/keras_cv/losses/focal.py",
"repo_id": "keras-cv",
"token_count": 1822
} | 22 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
try:
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
except ImportError:
COCO = object
COCOeval = None
from keras_cv.utils.conditional_imports import assert_pycocotools_installed
METRIC_NAMES = [
"AP",
"AP50",
"AP75",
"APs",
"APm",
"APl",
"ARmax1",
"ARmax10",
"ARmax100",
"ARs",
"ARm",
"ARl",
]
class PyCOCOWrapper(COCO):
"""COCO wrapper class.
This class wraps COCO API object, which provides the following additional
functionalities:
1. Support string type image id.
2. Support loading the groundtruth dataset using the external annotation
dictionary.
3. Support loading the prediction results using the external annotation
dictionary.
"""
def __init__(self, gt_dataset=None):
"""Instantiates a COCO-style API object.
Args:
eval_type: either 'box' or 'mask'.
annotation_file: a JSON file that stores annotations of the eval
dataset. This is required if `gt_dataset` is not provided.
gt_dataset: the groundtruth eval dataset in COCO API format.
"""
assert_pycocotools_installed("PyCOCOWrapper")
COCO.__init__(self, annotation_file=None)
self._eval_type = "box"
if gt_dataset:
self.dataset = gt_dataset
self.createIndex()
def loadRes(self, predictions):
"""Loads result file and return a result api object.
Args:
predictions: a list of dictionary each representing an annotation in
COCO format. The required fields are `image_id`, `category_id`,
`score`, `bbox`, `segmentation`.
Returns:
res: result COCO api object.
Raises:
ValueError: if the set of image id from predictions is not the subset
of the set of image id of the groundtruth dataset.
"""
res = COCO()
res.dataset["images"] = copy.deepcopy(self.dataset["images"])
res.dataset["categories"] = copy.deepcopy(self.dataset["categories"])
image_ids = [ann["image_id"] for ann in predictions]
if set(image_ids) != (set(image_ids) & set(self.getImgIds())):
raise ValueError(
"Results do not correspond to the current dataset!"
)
for ann in predictions:
x1, x2, y1, y2 = [
ann["bbox"][0],
ann["bbox"][0] + ann["bbox"][2],
ann["bbox"][1],
ann["bbox"][1] + ann["bbox"][3],
]
ann["area"] = ann["bbox"][2] * ann["bbox"][3]
ann["segmentation"] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
res.dataset["annotations"] = copy.deepcopy(predictions)
res.createIndex()
return res
def _yxyx_to_xywh(boxes):
if boxes.shape[-1] != 4:
raise ValueError(
"boxes.shape[-1] is {:d}, but must be 4.".format(boxes.shape[-1])
)
boxes_ymin = boxes[..., 0]
boxes_xmin = boxes[..., 1]
boxes_width = boxes[..., 3] - boxes[..., 1]
boxes_height = boxes[..., 2] - boxes[..., 0]
new_boxes = np.stack(
[boxes_xmin, boxes_ymin, boxes_width, boxes_height], axis=-1
)
return new_boxes
def _convert_predictions_to_coco_annotations(predictions):
coco_predictions = []
num_batches = len(predictions["source_id"])
for i in range(num_batches):
batch_size = predictions["source_id"][i].shape[0]
predictions["detection_boxes"][i] = predictions["detection_boxes"][
i
].copy()
for j in range(batch_size):
max_num_detections = predictions["num_detections"][i][j]
predictions["detection_boxes"][i][j] = _yxyx_to_xywh(
predictions["detection_boxes"][i][j]
)
for k in range(max_num_detections):
ann = {}
ann["image_id"] = predictions["source_id"][i][j]
ann["category_id"] = predictions["detection_classes"][i][j][k]
ann["bbox"] = predictions["detection_boxes"][i][j][k]
ann["score"] = predictions["detection_scores"][i][j][k]
coco_predictions.append(ann)
for i, ann in enumerate(coco_predictions):
ann["id"] = i + 1
return coco_predictions
def _convert_groundtruths_to_coco_dataset(groundtruths, label_map=None):
source_ids = np.concatenate(groundtruths["source_id"], axis=0)
gt_images = [{"id": i} for i in source_ids]
gt_annotations = []
num_batches = len(groundtruths["source_id"])
for i in range(num_batches):
max_num_instances = max(x.shape[0] for x in groundtruths["classes"][i])
batch_size = groundtruths["source_id"][i].shape[0]
for j in range(batch_size):
num_instances = groundtruths["num_detections"][i][j]
if num_instances > max_num_instances:
num_instances = max_num_instances
for k in range(int(num_instances)):
ann = {}
ann["image_id"] = groundtruths["source_id"][i][j]
ann["iscrowd"] = 0
ann["category_id"] = int(groundtruths["classes"][i][j][k])
boxes = groundtruths["boxes"][i]
ann["bbox"] = [
float(boxes[j][k][1]),
float(boxes[j][k][0]),
float(boxes[j][k][3] - boxes[j][k][1]),
float(boxes[j][k][2] - boxes[j][k][0]),
]
ann["area"] = float(
(boxes[j][k][3] - boxes[j][k][1])
* (boxes[j][k][2] - boxes[j][k][0])
)
gt_annotations.append(ann)
for i, ann in enumerate(gt_annotations):
ann["id"] = i + 1
if label_map:
gt_categories = [{"id": i, "name": label_map[i]} for i in label_map]
else:
category_ids = [gt["category_id"] for gt in gt_annotations]
gt_categories = [{"id": i} for i in set(category_ids)]
gt_dataset = {
"images": gt_images,
"categories": gt_categories,
"annotations": copy.deepcopy(gt_annotations),
}
return gt_dataset
def _concat_numpy(groundtruths, predictions):
"""Converts tensors to numpy arrays."""
numpy_groundtruths = {}
for key, val in groundtruths.items():
if isinstance(val, tuple):
val = np.concatenate(val)
numpy_groundtruths[key] = val
numpy_predictions = {}
for key, val in predictions.items():
if isinstance(val, tuple):
val = np.concatenate(val)
numpy_predictions[key] = val
return numpy_groundtruths, numpy_predictions
def compute_pycoco_metrics(groundtruths, predictions):
assert_pycocotools_installed("compute_pycoco_metrics")
groundtruths, predictions = _concat_numpy(groundtruths, predictions)
gt_dataset = _convert_groundtruths_to_coco_dataset(groundtruths)
coco_gt = PyCOCOWrapper(gt_dataset=gt_dataset)
coco_predictions = _convert_predictions_to_coco_annotations(predictions)
coco_dt = coco_gt.loadRes(predictions=coco_predictions)
image_ids = [ann["image_id"] for ann in coco_predictions]
coco_eval = COCOeval(coco_gt, coco_dt, iouType="bbox")
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_metrics = coco_eval.stats
metrics = coco_metrics
metrics_dict = {}
for i, name in enumerate(METRIC_NAMES):
metrics_dict[name] = metrics[i].astype(np.float32)
return metrics_dict
| keras-cv/keras_cv/metrics/coco/pycoco_wrapper.py/0 | {
"file_path": "keras-cv/keras_cv/metrics/coco/pycoco_wrapper.py",
"repo_id": "keras-cv",
"token_count": 3808
} | 23 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_aliases import (
EfficientNetV1B0Backbone,
)
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_backbone import (
EfficientNetV1Backbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
@pytest.mark.extra_large
class EfficientNetV1PresetFullTest(TestCase):
"""
Test the full enumeration of our preset.
This every presets for EfficientNetV1 and is only run manually.
Run with:
`pytest keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone_presets_test.py --run_extra_large`
""" # noqa: E501
@parameterized.named_parameters(
*[(preset, preset) for preset in EfficientNetV1Backbone.presets]
)
def test_load_efficientnet(self, preset):
input_data = np.ones(shape=(2, 224, 224, 3))
model = EfficientNetV1Backbone.from_preset(preset)
model(input_data)
def test_efficientnet_feature_extractor(self):
model = EfficientNetV1B0Backbone(
include_rescaling=False,
input_shape=[256, 256, 3],
)
levels = ["P3", "P4"]
layer_names = [model.pyramid_level_inputs[level] for level in levels]
backbone_model = get_feature_extractor(model, layer_names, levels)
inputs = keras.Input(shape=[256, 256, 3])
outputs = backbone_model(inputs)
self.assertLen(outputs, 2)
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(outputs["P3"].shape[:3], (None, 32, 32))
self.assertEquals(outputs["P4"].shape[:3], (None, 16, 16))
| keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone_presets_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone_presets_test.py",
"repo_id": "keras-cv",
"token_count": 872
} | 24 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileNet v3 backbone model.
References:
- [Searching for MobileNetV3](https://arxiv.org/pdf/1905.02244.pdf)
(ICCV 2019)
- [Based on the original keras.applications MobileNetv3](https://github.com/keras-team/keras/blob/master/keras/applications/mobilenet_v3.py)
""" # noqa: E501
import copy
from keras_cv import layers as cv_layers
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone_presets import ( # noqa: E501
backbone_presets_with_weights,
)
from keras_cv.utils.python_utils import classproperty
CHANNEL_AXIS = -1
BN_EPSILON = 1e-3
BN_MOMENTUM = 0.999
@keras_cv_export("keras_cv.models.MobileNetV3Backbone")
class MobileNetV3Backbone(Backbone):
"""Instantiates the MobileNetV3 architecture.
References:
- [Searching for MobileNetV3](https://arxiv.org/pdf/1905.02244.pdf)
(ICCV 2019)
- [Based on the Original keras.applications MobileNetv3](https://github.com/keras-team/keras/blob/master/keras/applications/mobilenet_v3.py)
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
stackwise_expansion: list of ints or floats, the expansion ratio for
each inverted residual block in the model.
stackwise_filters: list of ints, number of filters for each inverted
residual block in the model.
stackwise_stride: list of ints, stride length for each inverted
residual block in the model.
include_rescaling: bool, whether to rescale the inputs. If set to True,
inputs will be passed through a `Rescaling(scale=1 / 255)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
alpha: float, controls the width of the network. This is known as the
depth multiplier in the MobileNetV3 paper, but the name is kept for
consistency with MobileNetV1 in Keras.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone with a custom config
model = MobileNetV3Backbone(
stackwise_expansion=[1, 72.0 / 16, 88.0 / 24, 4, 6, 6, 3, 3, 6, 6, 6],
stackwise_filters=[16, 24, 24, 40, 40, 40, 48, 48, 96, 96, 96],
stackwise_kernel_size=[3, 3, 3, 5, 5, 5, 5, 5, 5, 5, 5],
stackwise_stride=[2, 2, 1, 2, 1, 1, 1, 1, 2, 1, 1],
stackwise_se_ratio=[0.25, None, None, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25],
stackwise_activation=["relu", "relu", "relu", "hard_swish", "hard_swish", "hard_swish", "hard_swish", "hard_swish", "hard_swish", "hard_swish", "hard_swish"],
include_rescaling=False,
)
output = model(input_data)
```
""" # noqa: E501
def __init__(
self,
*,
stackwise_expansion,
stackwise_filters,
stackwise_kernel_size,
stackwise_stride,
stackwise_se_ratio,
stackwise_activation,
include_rescaling,
input_shape=(None, None, 3),
input_tensor=None,
alpha=1.0,
**kwargs,
):
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = keras.layers.Rescaling(scale=1 / 255)(x)
x = keras.layers.Conv2D(
16,
kernel_size=3,
strides=(2, 2),
padding="same",
use_bias=False,
name="Conv",
)(x)
x = keras.layers.BatchNormalization(
axis=CHANNEL_AXIS,
epsilon=BN_EPSILON,
momentum=BN_MOMENTUM,
name="Conv_BatchNorm",
)(x)
x = apply_hard_swish(x)
pyramid_level_inputs = []
for stack_index in range(len(stackwise_filters)):
if stackwise_stride[stack_index] != 1:
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
x = apply_inverted_res_block(
x,
expansion=stackwise_expansion[stack_index],
filters=adjust_channels(
(stackwise_filters[stack_index]) * alpha
),
kernel_size=stackwise_kernel_size[stack_index],
stride=stackwise_stride[stack_index],
se_ratio=stackwise_se_ratio[stack_index],
activation=stackwise_activation[stack_index],
expansion_index=stack_index,
)
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
last_conv_ch = adjust_channels(x.shape[CHANNEL_AXIS] * 6)
x = keras.layers.Conv2D(
last_conv_ch,
kernel_size=1,
padding="same",
use_bias=False,
name="Conv_1",
)(x)
x = keras.layers.BatchNormalization(
axis=CHANNEL_AXIS,
epsilon=BN_EPSILON,
momentum=BN_MOMENTUM,
name="Conv_1_BatchNorm",
)(x)
x = apply_hard_swish(x)
super().__init__(inputs=inputs, outputs=x, **kwargs)
self.pyramid_level_inputs = {
f"P{i + 1}": name for i, name in enumerate(pyramid_level_inputs)
}
self.stackwise_expansion = stackwise_expansion
self.stackwise_filters = stackwise_filters
self.stackwise_kernel_size = stackwise_kernel_size
self.stackwise_stride = stackwise_stride
self.stackwise_se_ratio = stackwise_se_ratio
self.stackwise_activation = stackwise_activation
self.include_rescaling = include_rescaling
self.input_tensor = input_tensor
self.alpha = alpha
def get_config(self):
config = super().get_config()
config.update(
{
"stackwise_expansion": self.stackwise_expansion,
"stackwise_filters": self.stackwise_filters,
"stackwise_kernel_size": self.stackwise_kernel_size,
"stackwise_stride": self.stackwise_stride,
"stackwise_se_ratio": self.stackwise_se_ratio,
"stackwise_activation": self.stackwise_activation,
"include_rescaling": self.include_rescaling,
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"alpha": self.alpha,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(backbone_presets_with_weights)
class HardSigmoidActivation(keras.layers.Layer):
def __init__(self):
super().__init__()
def call(self, x):
return apply_hard_sigmoid(x)
def get_config(self):
return super().get_config()
def adjust_channels(x, divisor=8, min_value=None):
"""Ensure that all layers have a channel number divisible by the `divisor`.
Args:
x: integer, input value.
divisor: integer, the value by which a channel number should be
divisible, defaults to 8.
min_value: float, optional minimum value for the new tensor. If None,
defaults to value of divisor.
Returns:
the updated input scalar.
"""
if min_value is None:
min_value = divisor
new_x = max(min_value, int(x + divisor / 2) // divisor * divisor)
# make sure that round down does not go down by more than 10%.
if new_x < 0.9 * x:
new_x += divisor
return new_x
def apply_hard_sigmoid(x):
activation = keras.layers.ReLU(6.0)
return activation(x + 3.0) * (1.0 / 6.0)
def apply_hard_swish(x):
return keras.layers.Multiply()([x, apply_hard_sigmoid(x)])
def apply_inverted_res_block(
x,
expansion,
filters,
kernel_size,
stride,
se_ratio,
activation,
expansion_index,
):
"""An Inverted Residual Block.
Args:
x: input tensor.
expansion: integer, the expansion ratio, multiplied with infilters to
get the minimum value passed to adjust_channels.
filters: integer, number of filters for convolution layer.
kernel_size: integer, the kernel size for DepthWise Convolutions.
stride: integer, the stride length for DepthWise Convolutions.
se_ratio: float, ratio for bottleneck filters. Number of bottleneck
filters = filters * se_ratio.
activation: the activation layer to use.
expansion_index: integer, a unique identification if you want to use
expanded convolutions. If greater than 0, an additional Conv+BN
layer is added after the expanded convolutional layer.
Returns:
the updated input tensor.
"""
if isinstance(activation, str):
if activation == "hard_swish":
activation = apply_hard_swish
else:
activation = keras.activations.get(activation)
shortcut = x
prefix = "expanded_conv_"
infilters = x.shape[CHANNEL_AXIS]
if expansion_index > 0:
prefix = f"expanded_conv_{expansion_index}_"
x = keras.layers.Conv2D(
adjust_channels(infilters * expansion),
kernel_size=1,
padding="same",
use_bias=False,
name=prefix + "expand",
)(x)
x = keras.layers.BatchNormalization(
axis=CHANNEL_AXIS,
epsilon=BN_EPSILON,
momentum=BN_MOMENTUM,
name=prefix + "expand_BatchNorm",
)(x)
x = activation(x)
if stride == 2:
x = keras.layers.ZeroPadding2D(
padding=utils.correct_pad_downsample(x, kernel_size),
name=prefix + "depthwise_pad",
)(x)
x = keras.layers.DepthwiseConv2D(
kernel_size,
strides=stride,
padding="same" if stride == 1 else "valid",
use_bias=False,
name=prefix + "depthwise",
)(x)
x = keras.layers.BatchNormalization(
axis=CHANNEL_AXIS,
epsilon=BN_EPSILON,
momentum=BN_MOMENTUM,
name=prefix + "depthwise_BatchNorm",
)(x)
x = activation(x)
if se_ratio:
se_filters = adjust_channels(infilters * expansion)
x = cv_layers.SqueezeAndExcite2D(
filters=se_filters,
bottleneck_filters=adjust_channels(se_filters * se_ratio),
squeeze_activation="relu",
excite_activation=HardSigmoidActivation(),
)(x)
x = keras.layers.Conv2D(
filters,
kernel_size=1,
padding="same",
use_bias=False,
name=prefix + "project",
)(x)
x = keras.layers.BatchNormalization(
axis=CHANNEL_AXIS,
epsilon=BN_EPSILON,
momentum=BN_MOMENTUM,
name=prefix + "project_BatchNorm",
)(x)
if stride == 1 and infilters == filters:
x = keras.layers.Add(name=prefix + "Add")([shortcut, x])
return x
| keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_backbone.py",
"repo_id": "keras-cv",
"token_count": 5692
} | 25 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
CrossStagePartial,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
DarknetConvBlock,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
DarknetConvBlockDepthwise,
)
class YoloXPAFPN(keras.layers.Layer):
"""The YoloX PAFPN.
YoloX PAFPN is an FPN layer used in YoloX models. The YoloX PAFPN is based
on the feature pyramid module used in Path Aggregation networks (PANet).
Arguments:
depth_multiplier: A float value used to calculate the base depth of the
model this changes based on the detection model being used. Defaults
to 1.0.
width_multiplier: A float value used to calculate the base width of the
model this changes based on the detection model being used. Defaults
to 1.0.
in_channels: A list representing the number of filters in the FPN
output. The length of the list will be same as the number of
outputs. Defaults to `(256, 512, 1024)`.
use_depthwise: a boolean value used to decide whether a depthwise conv
block should be used over a regular darknet block. Defaults to
`False`.
activation: the activation applied after the BatchNorm layer. One of
`"silu"`, `"relu"` or `"leaky_relu"`. Defaults to `"silu"`.
"""
def __init__(
self,
depth_multiplier=1.0,
width_multiplier=1.0,
in_channels=(256, 512, 1024),
use_depthwise=False,
activation="silu",
**kwargs
):
super().__init__(**kwargs)
self.in_channels = in_channels
ConvBlock = (
DarknetConvBlockDepthwise if use_depthwise else DarknetConvBlock
)
self.lateral_conv0 = DarknetConvBlock(
filters=int(in_channels[1] * width_multiplier),
kernel_size=1,
strides=1,
activation=activation,
)
self.C3_p4 = CrossStagePartial(
filters=int(in_channels[1] * width_multiplier),
num_bottlenecks=round(3 * depth_multiplier),
residual=False,
use_depthwise=use_depthwise,
activation=activation,
)
self.reduce_conv1 = DarknetConvBlock(
filters=int(in_channels[0] * width_multiplier),
kernel_size=1,
strides=1,
activation=activation,
)
self.C3_p3 = CrossStagePartial(
filters=int(in_channels[0] * width_multiplier),
num_bottlenecks=round(3 * depth_multiplier),
residual=False,
use_depthwise=use_depthwise,
activation=activation,
)
self.bu_conv2 = ConvBlock(
filters=int(in_channels[0] * width_multiplier),
kernel_size=3,
strides=2,
activation=activation,
)
self.C3_n3 = CrossStagePartial(
filters=int(in_channels[1] * width_multiplier),
num_bottlenecks=round(3 * depth_multiplier),
residual=False,
use_depthwise=use_depthwise,
activation=activation,
)
self.bu_conv1 = ConvBlock(
filters=int(in_channels[1] * width_multiplier),
kernel_size=3,
strides=2,
activation=activation,
)
self.C3_n4 = CrossStagePartial(
filters=int(in_channels[2] * width_multiplier),
num_bottlenecks=round(3 * depth_multiplier),
residual=False,
use_depthwise=use_depthwise,
activation=activation,
)
self.concat = keras.layers.Concatenate(axis=-1)
self.upsample_2x = keras.layers.UpSampling2D(2)
def call(self, inputs, training=False):
c3_output, c4_output, c5_output = inputs[3], inputs[4], inputs[5]
fpn_out0 = self.lateral_conv0(c5_output)
f_out0 = self.upsample_2x(fpn_out0)
f_out0 = self.concat([f_out0, c4_output])
f_out0 = self.C3_p4(f_out0)
fpn_out1 = self.reduce_conv1(f_out0)
f_out1 = self.upsample_2x(fpn_out1)
f_out1 = self.concat([f_out1, c3_output])
pan_out2 = self.C3_p3(f_out1)
p_out1 = self.bu_conv2(pan_out2)
p_out1 = self.concat([p_out1, fpn_out1])
pan_out1 = self.C3_n3(p_out1)
p_out0 = self.bu_conv1(pan_out1)
p_out0 = self.concat([p_out0, fpn_out0])
pan_out0 = self.C3_n4(p_out0)
return pan_out2, pan_out1, pan_out0
| keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_pafpn.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_pafpn.py",
"repo_id": "keras-cv",
"token_count": 2419
} | 26 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.models import DeepLabV3Plus
from keras_cv.models import ResNet18V2Backbone
from keras_cv.models.backbones.test_backbone_presets import (
test_backbone_presets,
)
from keras_cv.tests.test_case import TestCase
class DeepLabV3PlusTest(TestCase):
def test_deeplab_v3_plus_construction(self):
backbone = ResNet18V2Backbone(input_shape=[256, 256, 3])
model = DeepLabV3Plus(backbone=backbone, num_classes=2)
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(),
metrics=["accuracy"],
)
@pytest.mark.large
def test_deeplab_v3_plus_call(self):
backbone = ResNet18V2Backbone(input_shape=[256, 256, 3])
model = DeepLabV3Plus(backbone=backbone, num_classes=2)
images = np.random.uniform(size=(2, 256, 256, 3))
_ = model(images)
_ = model.predict(images)
@pytest.mark.large
def test_weights_change(self):
target_size = [256, 256, 3]
images = np.ones([1] + target_size)
labels = np.random.uniform(size=[1] + target_size)
ds = tf.data.Dataset.from_tensor_slices((images, labels))
ds = ds.repeat(2)
ds = ds.batch(2)
backbone = ResNet18V2Backbone(input_shape=target_size)
model = DeepLabV3Plus(backbone=backbone, num_classes=3)
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(),
metrics=["accuracy"],
)
original_weights = model.segmentation_head.get_weights()
model.fit(ds, epochs=1)
updated_weights = model.segmentation_head.get_weights()
for w1, w2 in zip(original_weights, updated_weights):
self.assertNotAllEqual(w1, w2)
self.assertFalse(ops.any(ops.isnan(w2)))
@pytest.mark.large
def test_with_model_preset_forward_pass(self):
if not keras_3():
self.skipTest("TODO: #2246 Not supported for Keras 2")
model = DeepLabV3Plus.from_preset(
"deeplab_v3_plus_resnet50_pascalvoc",
num_classes=21,
input_shape=[256, 256, 3],
)
image = np.ones((1, 256, 256, 3))
output = ops.expand_dims(ops.argmax(model(image), axis=-1), axis=-1)
expected_output = np.zeros((1, 256, 256, 1))
self.assertAllClose(output, expected_output)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
target_size = [256, 256, 3]
backbone = ResNet18V2Backbone(input_shape=target_size)
model = DeepLabV3Plus(backbone=backbone, num_classes=2)
input_batch = np.ones(shape=[2] + target_size)
model_output = model(input_batch)
save_path = os.path.join(self.get_temp_dir(), "model.keras")
if keras_3():
model.save(save_path)
else:
model.save(save_path, save_format="keras_v3")
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, DeepLabV3Plus)
# Check that output matches.
restored_output = restored_model(input_batch)
self.assertAllClose(model_output, restored_output)
@pytest.mark.large
class DeepLabV3PlusSmokeTest(TestCase):
@parameterized.named_parameters(
*[(preset, preset) for preset in test_backbone_presets]
)
def test_backbone_preset(self, preset):
model = DeepLabV3Plus.from_preset(
preset,
num_classes=3,
)
xs = np.random.uniform(size=(1, 128, 128, 3))
output = model(xs)
self.assertEqual(output.shape, (1, 128, 128, 3))
| keras-cv/keras_cv/models/segmentation/deeplab_v3_plus/deeplab_v3_plus_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/deeplab_v3_plus/deeplab_v3_plus_test.py",
"repo_id": "keras-cv",
"token_count": 1945
} | 27 |
# Stable Diffusion v1-4 Model Card
Stable Diffusion is a latent text-to-image diffusion model capable of generating photo-realistic images given any text input.
For more information about how Stable Diffusion functions, please have a look at [KerasCV's tutorial covering StableDiffusion](https://keras.io/guides/keras_cv/generate_images_with_stable_diffusion/).
The **Stable-Diffusion-v1-4** checkpoint was initialized with the weights of the [Stable-Diffusion-v1-2](https:/steps/huggingface.co/CompVis/stable-diffusion-v1-2)
checkpoint and subsequently fine-tuned on 225k steps at resolution 512x512 on "laion-aesthetics v2 5+" and 10% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598).
By loading this model you accept the CreativeML Open RAIL-M license at https://raw.githubusercontent.com/CompVis/stable-diffusion/main/LICENSE
## Model Details
- **Developed by:** Robin Rombach, Patrick Esser
- **Model type:** Diffusion-based text-to-image generation model
- **Language(s):** English
- **License:** [The CreativeML OpenRAIL M license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) is an [Open RAIL M license](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), adapted from the work that [BigScience](https://bigscience.huggingface.co/) and [the RAIL Initiative](https://www.licenses.ai/) are jointly carrying in the area of responsible AI licensing. See also [the article about the BLOOM Open RAIL license](https://bigscience.huggingface.co/blog/the-bigscience-rail-license) on which our license is based.
- **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses a fixed, pretrained text encoder ([CLIP ViT-L/14](https://arxiv.org/abs/2103.00020)) as suggested in the [Imagen paper](https://arxiv.org/abs/2205.11487).
- **Resources for more information:** [GitHub Repository](https://github.com/CompVis/stable-diffusion), [Paper](https://arxiv.org/abs/2112.10752).
- **Cite as:**
@InProceedings{Rombach_2022_CVPR,
author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn},
title = {High-Resolution Image Synthesis With Latent Diffusion Models},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2022},
pages = {10684-10695}
}
# Uses
## Direct Use
The model is intended for research purposes only. Possible research areas and
tasks include
- Safe deployment of models which have the potential to generate harmful content.
- Probing and understanding the limitations and biases of generative models.
- Generation of artworks and use in design and other artistic processes.
- Applications in educational or creative tools.
- Research on generative models.
Excluded uses are described below.
### Misuse, Malicious Use, and Out-of-Scope Use
_Note: This section is taken from the [DALLE-MINI model card](https://huggingface.co/dalle-mini/dalle-mini), but applies in the same way to Stable Diffusion v1_.
The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
#### Out-of-Scope Use
The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model.
#### Misuse and Malicious Use
Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to:
- Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc.
- Intentionally promoting or propagating discriminatory content or harmful stereotypes.
- Impersonating individuals without their consent.
- Sexual content without consent of the people who might see it.
- Mis- and disinformation
- Representations of egregious violence and gore
- Sharing of copyrighted or licensed material in violation of its terms of use.
- Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use.
## Limitations and Bias
### Limitations
- The model does not achieve perfect photorealism
- The model cannot render legible text
- The model does not perform well on more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere”
- Faces and people in general may not be generated properly.
- The model was trained mainly with English captions and will not work as well in other languages.
- The auto-encoding part of the model is lossy
- The model was trained on a large-scale dataset
[LAION-5B](https://laion.ai/blog/laion-5b/) which contains adult material
and is not fit for product use without additional safety mechanisms and
considerations.
- No additional measures were used to deduplicate the dataset. As a result, we observe some degree of memorization for images that are duplicated in the training data.
The training data can be searched at [https://rom1504.github.io/clip-retrieval/](https://rom1504.github.io/clip-retrieval/) to possibly assist in the detection of memorized images.
### Bias
While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases.
Stable Diffusion v1 was trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/),
which consists of images that are primarily limited to English descriptions.
Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for.
This affects the overall output of the model, as white and western cultures are often set as the default. Further, the
ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts.
## More information
More information on StableDiffusion can be found in the [HuggingFace model card](https://huggingface.co/CompVis/stable-diffusion-v1-4)
| keras-cv/keras_cv/models/stable_diffusion/README.md/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/README.md",
"repo_id": "keras-cv",
"token_count": 1753
} | 28 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import metrics
from tensorflow.keras import optimizers
from keras_cv.layers import preprocessing
from keras_cv.losses import SimCLRLoss
from keras_cv.models import DenseNet121Backbone
from keras_cv.tests.test_case import TestCase
from keras_cv.training import ContrastiveTrainer
# TODO(jbischof): revisit "extra_large" tag once development resumes.
# These tests are currently some of the slowest in our repo.
@pytest.mark.extra_large
class ContrastiveTrainerTest(TestCase):
def test_probe_requires_probe_optimizer(self):
trainer = ContrastiveTrainer(
encoder=self.build_encoder(),
augmenter=self.build_augmenter(),
projector=self.build_projector(),
probe=self.build_probe(),
)
with self.assertRaises(ValueError):
trainer.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
)
def test_targets_required_if_probing(self):
trainer_with_probing = ContrastiveTrainer(
encoder=self.build_encoder(),
augmenter=self.build_augmenter(),
projector=self.build_projector(),
probe=self.build_probe(),
)
trainer_without_probing = ContrastiveTrainer(
encoder=self.build_encoder(),
augmenter=self.build_augmenter(),
projector=self.build_projector(),
probe=None,
)
images = tf.random.uniform((1, 50, 50, 3))
trainer_with_probing.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
probe_optimizer=optimizers.Adam(),
probe_loss=keras.losses.CategoricalCrossentropy(from_logits=True),
)
trainer_without_probing.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
)
with self.assertRaises(ValueError):
trainer_with_probing.fit(images)
def test_train_with_probing(self):
trainer_with_probing = ContrastiveTrainer(
encoder=self.build_encoder(),
augmenter=self.build_augmenter(),
projector=self.build_projector(),
probe=self.build_probe(num_classes=20),
)
images = tf.random.uniform((1, 50, 50, 3))
targets = np.ones((1, 20))
trainer_with_probing.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
probe_metrics=[
metrics.TopKCategoricalAccuracy(3, "top3_probe_accuracy")
],
probe_optimizer=optimizers.Adam(),
probe_loss=keras.losses.CategoricalCrossentropy(from_logits=True),
)
trainer_with_probing.fit(images, targets)
def test_train_without_probing(self):
trainer_without_probing = ContrastiveTrainer(
encoder=self.build_encoder(),
augmenter=self.build_augmenter(),
projector=self.build_projector(),
probe=None,
)
images = tf.random.uniform((1, 50, 50, 3))
targets = np.ones((1, 20))
trainer_without_probing.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
)
trainer_without_probing.fit(images)
trainer_without_probing.fit(images, targets)
def test_inference_not_supported(self):
trainer = ContrastiveTrainer(
encoder=self.build_encoder(),
augmenter=self.build_augmenter(),
projector=self.build_projector(),
probe=None,
)
trainer.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
)
with self.assertRaises(NotImplementedError):
trainer(np.ones((1, 50, 50, 3)))
def test_encoder_must_have_flat_output(self):
with self.assertRaises(ValueError):
_ = ContrastiveTrainer(
# A DenseNet without pooling does not have a flat output
encoder=DenseNet121Backbone(include_rescaling=False),
augmenter=self.build_augmenter(),
projector=self.build_projector(),
probe=None,
)
def test_with_multiple_augmenters_and_projectors(self):
augmenter0 = preprocessing.RandomFlip("horizontal")
augmenter1 = preprocessing.RandomFlip("vertical")
projector0 = layers.Dense(64, name="projector0")
projector1 = keras.Sequential(
[projector0, layers.ReLU(), layers.Dense(64, name="projector1")]
)
trainer_without_probing = ContrastiveTrainer(
encoder=self.build_encoder(),
augmenter=(augmenter0, augmenter1),
projector=(projector0, projector1),
probe=None,
)
images = tf.random.uniform((1, 50, 50, 3))
trainer_without_probing.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
)
trainer_without_probing.fit(images)
def build_augmenter(self):
return preprocessing.RandomFlip("horizontal")
def build_encoder(self):
return keras.Sequential(
[
DenseNet121Backbone(include_rescaling=False),
layers.GlobalAveragePooling2D(name="avg_pool"),
],
)
def build_projector(self):
return layers.Dense(128)
def build_probe(self, num_classes=20):
return layers.Dense(num_classes)
| keras-cv/keras_cv/training/contrastive/contrastive_trainer_test.py/0 | {
"file_path": "keras-cv/keras_cv/training/contrastive/contrastive_trainer_test.py",
"repo_id": "keras-cv",
"token_count": 2856
} | 29 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from tensorflow import keras
from keras_cv import core
def exhaustive_compare(obj1, obj2):
"""Exhaustively compared config of any two python
or Keras objects recursively.
If objects are python objects, a standard equality check is run.
If the objects are Keras objects a `get_config()` call is made.
The subsequent configs are then compared to determine if equality holds.
Args:
obj1: any object, can be a Keras object or python object.
obj2: any object, can be a Keras object or python object.
"""
classes_supporting_get_config = (
core.FactorSampler,
keras.layers.Layer,
keras.losses.Loss,
)
# If both objects are either one of list or tuple then their individual
# elements also must be checked exhaustively.
if isinstance(obj1, (list, tuple)) and isinstance(obj2, (list, tuple)):
# Length based checks.
if len(obj1) == 0 and len(obj2) == 0:
return True
if len(obj1) != len(obj2):
return False
# Exhaustive check for all elements.
for v1, v2 in list(zip(obj1, obj2)):
return exhaustive_compare(v1, v2)
# If the objects are dicts then we simply call the `config_equals` function
# which supports dicts.
elif isinstance(obj1, (dict)) and isinstance(obj2, (dict)):
return config_equals(v1, v2)
# If both objects are subclasses of Keras classes that support `get_config`
# method, then we compare their individual attributes using `config_equals`.
elif isinstance(obj1, classes_supporting_get_config) and isinstance(
obj2, classes_supporting_get_config
):
return config_equals(obj1.get_config(), obj2.get_config())
# Following checks are if either of the objects are _functions_, not methods
# or callables, since Layers and other unforeseen objects may also fit into
# this category. Specifically for Keras activation functions.
elif inspect.isfunction(obj1) and inspect.isfunction(obj2):
return keras.utils.serialize_keras_object(
obj1
) == keras.utils.serialize_keras_object(obj2)
elif inspect.isfunction(obj1) and not inspect.isfunction(obj2):
return keras.utils.serialize_keras_object(obj1) == obj2
elif inspect.isfunction(obj2) and not inspect.isfunction(obj1):
return obj1 == keras.utils.serialize_keras_object(obj2)
# Lastly check for primitive datatypes and objects that don't need
# additional preprocessing.
else:
return obj1 == obj2
def config_equals(config1, config2):
# Both `config1` and `config2` are python dicts. So the first check is to
# see if both of them have same keys.
if config1.keys() != config2.keys():
return False
# Iterate over all keys of the configs and compare each entry exhaustively.
for key in list(config1.keys()):
v1, v2 = config1[key], config2[key]
if not exhaustive_compare(v1, v2):
return False
return True
| keras-cv/keras_cv/utils/test_utils.py/0 | {
"file_path": "keras-cv/keras_cv/utils/test_utils.py",
"repo_id": "keras-cv",
"token_count": 1280
} | 30 |
sh_binary(
name = "build_pip_pkg",
srcs = ["build_deps/build_pip_pkg.sh"],
data = [
"LICENSE",
"MANIFEST.in",
"README.md",
"setup.cfg",
"setup.py",
"//keras_cv",
],
)
| keras-cv/BUILD.bazel/0 | {
"file_path": "keras-cv/BUILD.bazel",
"repo_id": "keras-cv",
"token_count": 138
} | 0 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.layers import JitteredResize
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
BOUNDING_BOXES,
)
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
IMAGES,
)
from keras_cv.utils import preprocessing as preprocessing_utils
class OldJitteredResize(BaseImageAugmentationLayer):
"""JitteredResize implements resize with scale distortion.
JitteredResize takes a three-step approach to size-distortion based image
augmentation. This technique is specifically tuned for object detection
pipelines. The layer takes an input of images and bounding boxes, both of
which may be ragged. It outputs a dense image tensor, ready to feed to a
model for training. As such this layer will commonly be the final step in an
augmentation pipeline.
The augmentation process is as follows:
The image is first scaled according to a randomly sampled scale factor. The
width and height of the image are then resized according to the sampled
scale. This is done to introduce noise into the local scale of features in
the image. A subset of the image is then cropped randomly according to
`crop_size`. This crop is then padded to be `target_size`. Bounding boxes
are translated and scaled according to the random scaling and random
cropping.
Usage:
```python
train_ds = load_object_detection_dataset()
jittered_resize = layers.JitteredResize(
target_size=(640, 640),
scale_factor=(0.8, 1.25),
bounding_box_format="xywh",
)
train_ds = train_ds.map(
jittered_resize, num_parallel_calls=tf.data.AUTOTUNE
)
# images now are (640, 640, 3)
# an example using crop size
train_ds = load_object_detection_dataset()
jittered_resize = layers.JitteredResize(
target_size=(640, 640),
crop_size=(250, 250),
scale_factor=(0.8, 1.25),
bounding_box_format="xywh",
)
train_ds = train_ds.map(
jittered_resize, num_parallel_calls=tf.data.AUTOTUNE
)
# images now are (640, 640, 3), but they were resized from a 250x250 crop.
```
Args:
target_size: A tuple representing the output size of images.
scale_factor: A tuple of two floats or a `keras_cv.FactorSampler`. For
each augmented image a value is sampled from the provided range.
This factor is used to scale the input image.
To replicate the results of the MaskRCNN paper pass `(0.8, 1.25)`.
crop_size: (Optional) the size of the image to crop from the scaled
image, defaults to `target_size` when not provided.
bounding_box_format: The format of bounding boxes of input boxes.
Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats.
interpolation: String, the interpolation method, defaults to
`"bilinear"`. Supports `"bilinear"`, `"nearest"`, `"bicubic"`,
`"area"`, `"lanczos3"`, `"lanczos5"`, `"gaussian"`,
`"mitchellcubic"`.
seed: (Optional) integer to use as the random seed.
"""
def __init__(
self,
target_size,
scale_factor,
crop_size=None,
bounding_box_format=None,
interpolation="bilinear",
seed=None,
**kwargs,
):
super().__init__(**kwargs)
if not isinstance(target_size, tuple) or len(target_size) != 2:
raise ValueError(
"JitteredResize() expects `target_size` to be a tuple of two "
f"integers. Received `target_size={target_size}`"
)
crop_size = crop_size or target_size
self.interpolation = preprocessing_utils.get_interpolation(
interpolation
)
self.scale_factor = preprocessing_utils.parse_factor(
scale_factor,
min_value=0.0,
max_value=None,
param_name="scale_factor",
seed=seed,
)
self.crop_size = crop_size
self.target_size = target_size
self.bounding_box_format = bounding_box_format
self.seed = seed
self.force_output_dense_images = True
self.auto_vectorize = False
def get_random_transformation(self, image=None, **kwargs):
original_image_shape = tf.shape(image)
image_shape = tf.cast(original_image_shape[0:2], tf.float32)
scaled_size = tf.round(image_shape * self.scale_factor())
scale = tf.minimum(
scaled_size[0] / image_shape[0], scaled_size[1] / image_shape[1]
)
scaled_size = tf.round(image_shape * scale)
image_scale = scaled_size / image_shape
max_offset = scaled_size - self.crop_size
max_offset = tf.where(
tf.less(max_offset, 0), tf.zeros_like(max_offset), max_offset
)
offset = max_offset * tf.random.uniform([2], minval=0, maxval=1)
offset = tf.cast(offset, tf.int32)
return {
"original_size": original_image_shape,
"image_scale": image_scale,
"scaled_size": scaled_size,
"offset": offset,
}
def compute_image_signature(self, images):
return tf.TensorSpec(
shape=list(self.target_size) + [images.shape[-1]],
dtype=self.compute_dtype,
)
def augment_image(self, image, transformation, **kwargs):
# unpackage augmentation arguments
scaled_size = transformation["scaled_size"]
offset = transformation["offset"]
target_size = self.target_size
crop_size = self.crop_size
scaled_image = tf.image.resize(
image, tf.cast(scaled_size, tf.int32), method=self.interpolation
)
scaled_image = scaled_image[
offset[0] : offset[0] + crop_size[0],
offset[1] : offset[1] + crop_size[1],
:,
]
scaled_image = tf.image.pad_to_bounding_box(
scaled_image, 0, 0, target_size[0], target_size[1]
)
return tf.cast(scaled_image, self.compute_dtype)
def augment_bounding_boxes(self, bounding_boxes, transformation, **kwargs):
if self.bounding_box_format is None:
raise ValueError(
"Please provide a `bounding_box_format` when augmenting "
"bounding boxes with `JitteredResize()`."
)
result = bounding_boxes.copy()
image_scale = tf.cast(transformation["image_scale"], self.compute_dtype)
offset = tf.cast(transformation["offset"], self.compute_dtype)
original_size = transformation["original_size"]
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
image_shape=original_size,
source=self.bounding_box_format,
target="yxyx",
)
# Adjusts box coordinates based on image_scale and offset.
yxyx = bounding_boxes["boxes"]
yxyx *= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
yxyx -= tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
result["boxes"] = yxyx
result = bounding_box.clip_to_image(
result,
image_shape=self.target_size + (3,),
bounding_box_format="yxyx",
)
result = bounding_box.convert_format(
result,
image_shape=self.target_size + (3,),
source="yxyx",
target=self.bounding_box_format,
)
return result
def augment_label(self, label, transformation, **kwargs):
return label
def get_config(self):
config = super().get_config()
config.update(
{
"target_size": self.target_size,
"scale_factor": self.scale_factor,
"crop_size": self.crop_size,
"bounding_box_format": self.bounding_box_format,
"interpolation": self.interpolation,
"seed": self.seed,
}
)
return config
class JitteredResizeTest(tf.test.TestCase):
def test_consistency_with_old_impl(self):
target_size = (32, 32)
fixed_scale_factor = (3 / 4, 3 / 4)
image = tf.random.uniform(shape=(1, 64, 64, 3)) * 255.0
layer = JitteredResize(
target_size=target_size,
scale_factor=fixed_scale_factor,
)
old_layer = OldJitteredResize(
target_size=target_size,
scale_factor=fixed_scale_factor,
)
# makes offsets fixed to (0.5, 0.5)
with unittest.mock.patch.object(
layer._random_generator,
"uniform",
return_value=tf.convert_to_tensor([[0.5, 0.5]]),
):
output = layer(image)
with unittest.mock.patch.object(
tf.random,
"uniform",
return_value=tf.convert_to_tensor([0.5, 0.5]),
):
old_output = old_layer(image)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
is_inputs_containing_bounding_boxes = True
num_images = [100, 200, 500, 1000]
results = {}
aug_candidates = [JitteredResize, OldJitteredResize]
aug_args = {
"target_size": (30, 30),
"scale_factor": (3 / 4, 4 / 3),
"bounding_box_format": "xyxy",
}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
inputs = {IMAGES: x_train[:n_images]}
if is_inputs_containing_bounding_boxes:
inputs.update(
{
BOUNDING_BOXES: {
"classes": tf.zeros(shape=(n_images, 4)),
"boxes": tf.zeros(shape=(n_images, 4, 4)),
}
}
)
# warmup
layer(inputs)
t0 = time.time()
r1 = layer(inputs)
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
inputs = {IMAGES: x_train[:n_images]}
if is_inputs_containing_bounding_boxes:
inputs.update(
{
BOUNDING_BOXES: {
"classes": tf.zeros(shape=(n_images, 4)),
"boxes": tf.zeros(shape=(n_images, 4, 4)),
}
}
)
# warmup
apply_aug(inputs)
t0 = time.time()
r1 = apply_aug(inputs)
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# tf.map_fn while_loop cannot run on XLA
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_jittered_resize.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_jittered_resize.py",
"repo_id": "keras-cv",
"token_count": 5992
} | 1 |
#!/usr/bin/env bash
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Builds a wheel of KerasCV for Pip. Requires Bazel.
# Adapted from https://github.com/tensorflow/addons/blob/master/build_deps/build_pip_pkg.sh
set -e
set -x
PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"
function is_windows() {
if [[ "${PLATFORM}" =~ (cygwin|mingw32|mingw64|msys)_nt* ]]; then
true
else
false
fi
}
if is_windows; then
PIP_FILE_PREFIX="bazel-bin/build_pip_pkg.exe.runfiles/__main__/"
else
PIP_FILE_PREFIX="bazel-bin/build_pip_pkg.runfiles/__main__/"
fi
function main() {
while [[ ! -z "${1}" ]]; do
if [[ ${1} == "make" ]]; then
echo "Using Makefile to build pip package."
PIP_FILE_PREFIX=""
else
DEST=${1}
fi
shift
done
if [[ -z ${DEST} ]]; then
echo "No destination dir provided"
exit 1
fi
# Create the directory, then do dirname on a non-existent file inside it to
# give us an absolute paths with tilde characters resolved to the destination
# directory.
mkdir -p ${DEST}
if [[ ${PLATFORM} == "darwin" ]]; then
DEST=$(pwd -P)/${DEST}
else
DEST=$(readlink -f "${DEST}")
fi
echo "=== destination directory: ${DEST}"
TMPDIR=$(mktemp -d -t tmp.XXXXXXXXXX)
echo $(date) : "=== Using tmpdir: ${TMPDIR}"
echo "=== Copy KerasCV Custom op files"
cp ${PIP_FILE_PREFIX}setup.cfg "${TMPDIR}"
cp ${PIP_FILE_PREFIX}setup.py "${TMPDIR}"
cp ${PIP_FILE_PREFIX}MANIFEST.in "${TMPDIR}"
cp ${PIP_FILE_PREFIX}README.md "${TMPDIR}"
cp ${PIP_FILE_PREFIX}LICENSE "${TMPDIR}"
if is_windows; then
from=$(cygpath -w ${PIP_FILE_PREFIX}keras_cv)
to=$(cygpath -w "${TMPDIR}"/keras_cv)
start robocopy //S "${from}" "${to}" //xf *_test.py
sleep 5
else
rsync -avm -L --exclude='*_test.py' ${PIP_FILE_PREFIX}keras_cv "${TMPDIR}"
fi
pushd ${TMPDIR}
echo $(date) : "=== Building wheel"
python setup.py bdist_wheel > /dev/null
cp dist/*.whl "${DEST}"
popd
rm -rf ${TMPDIR}
echo $(date) : "=== Output wheel file is in: ${DEST}"
}
main "$@"
| keras-cv/build_deps/build_pip_pkg.sh/0 | {
"file_path": "keras-cv/build_deps/build_pip_pkg.sh",
"repo_id": "keras-cv",
"token_count": 1026
} | 2 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for preprocessing demos."""
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.keras import backend
image_size = 512
BATCH_SIZE = 32
AUTOTUNE = tf.data.AUTOTUNE
mean = tf.constant([0.485, 0.456, 0.406])
std = tf.constant([0.229, 0.224, 0.225])
def normalize(input_image, input_mask):
input_image = tf.image.convert_image_dtype(input_image, tf.float32)
input_image = (input_image - mean) / tf.maximum(std, backend.epsilon())
input_image = input_image / 255
input_mask -= 1
return input_image, input_mask
def to_dict(datapoint):
input_image = tf.image.resize(datapoint["image"], (image_size, image_size))
input_mask = tf.image.resize(
datapoint["segmentation_mask"],
(image_size, image_size),
method="bilinear",
)
input_image, input_mask = normalize(input_image, input_mask)
input_mask = tf.one_hot(
tf.squeeze(tf.cast(input_mask, tf.int32), axis=-1), depth=3
)
return {"images": input_image, "segmentation_masks": input_mask}
def load_oxford_iiit_pet_dataset():
data, ds_info = tfds.load("oxford_iiit_pet:3.*.*", with_info=True)
print("Dataset info: ", ds_info)
dataset = data["train"]
return (
dataset.shuffle(10 * BATCH_SIZE)
.map(to_dict, num_parallel_calls=AUTOTUNE)
.batch(BATCH_SIZE)
)
def display(display_list):
plt.figure(figsize=(6, 6))
title = ["Input Image", "True Mask", "Predicted Mask"]
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i + 1)
plt.title(title[i])
plt.imshow(tf.keras.utils.array_to_img(display_list[i]))
plt.axis("off")
plt.show()
def visualize_dataset(ds):
for samples in ds.take(1):
sample_image, sample_mask = (
samples["images"][0],
samples["segmentation_masks"][0],
)
display([sample_image, sample_mask])
| keras-cv/examples/layers/preprocessing/segmentation/demo_utils.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/segmentation/demo_utils.py",
"repo_id": "keras-cv",
"token_count": 1010
} | 3 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Title: Train an Object Detection Model on Pascal VOC 2007 using KerasCV
Author: [lukewood](https://github.com/LukeWood), [tanzhenyu](https://github.com/tanzhenyu)
Date created: 2022/09/27
Last modified: 2023/03/29
Description: Use KerasCV to train a RetinaNet on Pascal VOC 2007.
""" # noqa: E501
import resource
import sys
import tensorflow as tf
import tensorflow_datasets as tfds
import tqdm
from absl import flags
from tensorflow import keras
import keras_cv
from keras_cv.callbacks import PyCOCOCallback
low, high = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (high, high))
flags.DEFINE_integer(
"epochs",
100,
"Number of epochs to run for.",
)
flags.DEFINE_string(
"weights_name",
"weights_{epoch:02d}.weights.h5",
"Directory which will be used to store weight checkpoints.",
)
flags.DEFINE_string(
"tensorboard_path",
"logs",
"Directory which will be used to store tensorboard logs.",
)
FLAGS = flags.FLAGS
FLAGS(sys.argv)
# parameters from RetinaNet [paper](https://arxiv.org/abs/1708.02002)
# Try to detect an available TPU. If none is present, defaults to
# MirroredStrategy
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
strategy = tf.distribute.TPUStrategy(tpu)
except ValueError:
# MirroredStrategy is best for a single machine with one or multiple GPUs
strategy = tf.distribute.MirroredStrategy()
BATCH_SIZE = 4
GLOBAL_BATCH_SIZE = BATCH_SIZE * strategy.num_replicas_in_sync
BASE_LR = 0.005 * GLOBAL_BATCH_SIZE / 16
print("Number of accelerators: ", strategy.num_replicas_in_sync)
print("Global Batch Size: ", GLOBAL_BATCH_SIZE)
IMG_SIZE = 640
image_size = [IMG_SIZE, IMG_SIZE, 3]
train_ds = tfds.load(
"voc/2007", split="train+validation", with_info=False, shuffle_files=True
)
train_ds = train_ds.concatenate(
tfds.load(
"voc/2012",
split="train+validation",
with_info=False,
shuffle_files=True,
)
)
eval_ds = tfds.load("voc/2007", split="test", with_info=False)
def unpackage_tfds_inputs(inputs, bounding_box_format):
image = inputs["image"]
boxes = keras_cv.bounding_box.convert_format(
inputs["objects"]["bbox"],
images=image,
source="rel_yxyx",
target=bounding_box_format,
)
bounding_boxes = {
"classes": tf.cast(inputs["objects"]["label"], dtype=tf.float32),
"boxes": tf.cast(boxes, dtype=tf.float32),
}
return {
"images": tf.cast(image, tf.float32),
"bounding_boxes": bounding_boxes,
}
train_ds = train_ds.map(
lambda inputs: unpackage_tfds_inputs(inputs, bounding_box_format="xywh"),
num_parallel_calls=tf.data.AUTOTUNE,
)
eval_ds = eval_ds.map(
lambda inputs: unpackage_tfds_inputs(inputs, bounding_box_format="xywh"),
num_parallel_calls=tf.data.AUTOTUNE,
)
augmenter = keras.Sequential(
layers=[
keras_cv.layers.RandomFlip(
mode="horizontal", bounding_box_format="xywh"
),
keras_cv.layers.JitteredResize(
target_size=(640, 640),
scale_factor=(0.8, 1.25),
bounding_box_format="xywh",
),
]
)
rand_augment = keras_cv.layers.RandAugment(
value_range=(0, 255),
augmentations_per_image=2,
magnitude=0.2,
rate=0.5,
magnitude_stddev=0.1,
geometric=False,
)
def apply_rand_augment(inputs):
inputs["images"] = rand_augment(inputs["images"])
return inputs
train_ds = train_ds.map(apply_rand_augment)
train_ds = train_ds.apply(
tf.data.experimental.dense_to_ragged_batch(BATCH_SIZE)
)
train_ds = train_ds.map(augmenter, num_parallel_calls=tf.data.AUTOTUNE)
def pad_fn(inputs):
inputs["bounding_boxes"] = keras_cv.bounding_box.to_dense(
inputs["bounding_boxes"], max_boxes=32
)
return inputs
train_ds = train_ds.shuffle(8 * strategy.num_replicas_in_sync)
train_ds = train_ds.map(pad_fn, num_parallel_calls=tf.data.AUTOTUNE)
train_ds = train_ds.prefetch(tf.data.AUTOTUNE)
eval_resizing = keras_cv.layers.Resizing(
640, 640, pad_to_aspect_ratio=True, bounding_box_format="xywh"
)
eval_ds = eval_ds.map(
eval_resizing,
num_parallel_calls=tf.data.AUTOTUNE,
)
eval_ds = eval_ds.apply(tf.data.experimental.dense_to_ragged_batch(BATCH_SIZE))
eval_ds = eval_ds.map(pad_fn, num_parallel_calls=tf.data.AUTOTUNE)
eval_ds = eval_ds.prefetch(tf.data.AUTOTUNE)
"""
## Model creation
We'll use the KerasCV API to construct a RetinaNet model. In this tutorial we
use a pretrained ResNet50 backbone using weights. In order to perform
fine-tuning, we freeze the backbone before training. When
`include_rescaling=True` is set, inputs to the model are expected to be in the
range `[0, 255]`.
"""
with strategy.scope():
model = keras_cv.models.RetinaNet(
# number of classes to be used in box classification
num_classes=20,
# For more info on supported bounding box formats, visit
# https://keras.io/api/keras_cv/bounding_box/
bounding_box_format="xywh",
backbone=keras_cv.models.ResNet50Backbone.from_preset(
"resnet50_imagenet"
),
)
lr_decay = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[12000 * 16, 16000 * 16],
values=[BASE_LR, 0.1 * BASE_LR, 0.01 * BASE_LR],
)
optimizer = tf.keras.optimizers.SGD(
learning_rate=lr_decay, momentum=0.9, global_clipnorm=10.0
)
model.prediction_decoder = keras_cv.layers.MultiClassNonMaxSuppression(
bounding_box_format="xywh", confidence_threshold=0.5, from_logits=True
)
model.compile(
classification_loss="focal",
box_loss="smoothl1",
optimizer=optimizer,
metrics=[],
)
class EvaluateCOCOMetricsCallback(keras.callbacks.Callback):
def __init__(self, data):
super().__init__()
self.data = data
self.metrics = keras_cv.metrics.BoxCOCOMetrics(
bounding_box_format="xywh", evaluate_freq=1e9
)
def on_epoch_end(self, epoch, logs):
self.metrics.reset_state()
for batch in tqdm.tqdm(self.data):
images, y_true = batch[0], batch[1]
y_pred = self.model.predict(images, verbose=0)
self.metrics.update_state(y_true, y_pred)
metrics = self.metrics.result(force=True)
logs.update(metrics)
return logs
callbacks = [
keras.callbacks.ReduceLROnPlateau(patience=5),
keras.callbacks.EarlyStopping(patience=10),
keras.callbacks.ModelCheckpoint(FLAGS.weights_name, save_weights_only=True),
# Temporarily need PyCOCOCallback to verify
# a 1:1 comparison with the PyMetrics version.
# Currently, results do not match. I have a feeling this is due
# to how we are creating the boxes in `BoxCOCOMetrics`
PyCOCOCallback(eval_ds, bounding_box_format="xywh"),
keras.callbacks.TensorBoard(log_dir=FLAGS.tensorboard_path),
]
history = model.fit(
train_ds,
validation_data=eval_ds,
epochs=FLAGS.epochs,
callbacks=callbacks,
)
| keras-cv/examples/training/object_detection/pascal_voc/retinanet.py/0 | {
"file_path": "keras-cv/examples/training/object_detection/pascal_voc/retinanet.py",
"repo_id": "keras-cv",
"token_count": 3107
} | 4 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.backend import config
if config.keras_3():
from keras.src.backend.tensorflow import * # noqa: F403, F401
from keras.src.backend.tensorflow import ( # noqa: F403, F401
convert_to_numpy,
)
from keras.src.backend.tensorflow.core import * # noqa: F403, F401
from keras.src.backend.tensorflow.math import * # noqa: F403, F401
from keras.src.backend.tensorflow.nn import * # noqa: F403, F401
from keras.src.backend.tensorflow.numpy import * # noqa: F403, F401
else:
# isort: off
from keras_core.src.backend.tensorflow import * # noqa: F403, F401
from keras_core.src.backend.tensorflow import ( # noqa: F403, F401
convert_to_numpy,
)
from keras_core.src.backend.tensorflow.core import * # noqa: F403, F401
from keras_core.src.backend.tensorflow.math import * # noqa: F403, F401
from keras_core.src.backend.tensorflow.nn import * # noqa: F403, F401
from keras_core.src.backend.tensorflow.numpy import * # noqa: F403, F401, E501
# Some TF APIs where the numpy API doesn't support raggeds that we need
from tensorflow import broadcast_to # noqa: F403, F401
from tensorflow import concat as concatenate # noqa: F403, F401
from tensorflow import repeat # noqa: F403, F401
from tensorflow import reshape # noqa: F403, F401
from tensorflow import range as arange # noqa: F403, F401
from tensorflow import reduce_all as all # noqa: F403, F401
from tensorflow import reduce_max as max # noqa: F403, F401
from tensorflow import split # noqa: F403, F401
import numpy as np
import tensorflow as tf
def smart_resize(x, size, interpolation="bilinear"):
"""Resize images to a target size without aspect ratio distortion.
Copied from `tf_keras` for Keras 3 and for use in `tf.data` pipeline.
"""
if len(size) != 2:
raise ValueError(
f"Expected `size` to be a tuple of 2 integers, but got: {size}."
)
img = tf.convert_to_tensor(x)
if img.shape.rank is not None:
if img.shape.rank < 3 or img.shape.rank > 4:
raise ValueError(
"Expected an image array with shape `(height, width, "
"channels)`, or `(batch_size, height, width, channels)`, but "
f"got input with incorrect rank, of shape {img.shape}."
)
shape = tf.shape(img)
height, width = shape[-3], shape[-2]
target_height, target_width = size
if img.shape.rank is not None:
static_num_channels = img.shape[-1]
else:
static_num_channels = None
crop_height = tf.cast(
tf.cast(width * target_height, "float32") / target_width, "int32"
)
crop_width = tf.cast(
tf.cast(height * target_width, "float32") / target_height, "int32"
)
# Set back to input height / width if crop_height / crop_width is not
# smaller.
crop_height = tf.minimum(height, crop_height)
crop_width = tf.minimum(width, crop_width)
crop_box_hstart = tf.cast(
tf.cast(height - crop_height, "float32") / 2, "int32"
)
crop_box_wstart = tf.cast(
tf.cast(width - crop_width, "float32") / 2, "int32"
)
if img.shape.rank == 4:
crop_box_start = tf.stack([0, crop_box_hstart, crop_box_wstart, 0])
crop_box_size = tf.stack([-1, crop_height, crop_width, -1])
else:
crop_box_start = tf.stack([crop_box_hstart, crop_box_wstart, 0])
crop_box_size = tf.stack([crop_height, crop_width, -1])
img = tf.slice(img, crop_box_start, crop_box_size)
img = tf.image.resize(images=img, size=size, method=interpolation)
# Apparent bug in resize_images_v2 may cause shape to be lost
if img.shape.rank is not None:
if img.shape.rank == 4:
img.set_shape((None, None, None, static_num_channels))
if img.shape.rank == 3:
img.set_shape((None, None, static_num_channels))
if isinstance(x, np.ndarray):
return img.numpy()
return img
| keras-cv/keras_cv/backend/tf_ops.py/0 | {
"file_path": "keras-cv/keras_cv/backend/tf_ops.py",
"repo_id": "keras-cv",
"token_count": 1832
} | 5 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for working with bounding boxes."""
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import ops
from keras_cv.bounding_box.formats import XYWH
@keras_cv_export("keras_cv.bounding_box.is_relative")
def is_relative(bounding_box_format):
"""A util to check if a bounding box format uses relative coordinates"""
if (
bounding_box_format.lower()
not in bounding_box.converters.TO_XYXY_CONVERTERS
):
raise ValueError(
"`is_relative()` received an unsupported format for the argument "
f"`bounding_box_format`. `bounding_box_format` should be one of "
f"{bounding_box.converters.TO_XYXY_CONVERTERS.keys()}. "
f"Got bounding_box_format={bounding_box_format}"
)
return bounding_box_format.startswith("rel")
@keras_cv_export("keras_cv.bounding_box.as_relative")
def as_relative(bounding_box_format):
"""A util to get the relative equivalent of a provided bounding box format.
If the specified format is already a relative format,
it will be returned unchanged.
"""
if not is_relative(bounding_box_format):
return "rel_" + bounding_box_format
return bounding_box_format
def _relative_area(boxes, bounding_box_format):
boxes = bounding_box.convert_format(
boxes,
source=bounding_box_format,
target="rel_xywh",
)
widths = boxes[..., XYWH.WIDTH]
heights = boxes[..., XYWH.HEIGHT]
# handle corner case where shear performs a full inversion.
return ops.where(
ops.logical_and(widths > 0, heights > 0), widths * heights, 0.0
)
@keras_cv_export("keras_cv.bounding_box.clip_to_image")
def clip_to_image(
bounding_boxes, bounding_box_format, images=None, image_shape=None
):
"""clips bounding boxes to image boundaries.
`clip_to_image()` clips bounding boxes that have coordinates out of bounds
of an image down to the boundaries of the image. This is done by converting
the bounding box to relative formats, then clipping them to the `[0, 1]`
range. Additionally, bounding boxes that end up with a zero area have their
class ID set to -1, indicating that there is no object present in them.
Args:
bounding_boxes: bounding box tensor to clip.
bounding_box_format: the KerasCV bounding box format the bounding boxes
are in.
images: list of images to clip the bounding boxes to.
image_shape: the shape of the images to clip the bounding boxes to.
"""
boxes, classes = bounding_boxes["boxes"], bounding_boxes["classes"]
boxes = bounding_box.convert_format(
boxes,
source=bounding_box_format,
target="rel_xyxy",
images=images,
image_shape=image_shape,
)
boxes, classes, images, squeeze = _format_inputs(boxes, classes, images)
x1, y1, x2, y2 = ops.split(boxes, 4, axis=-1)
clipped_bounding_boxes = ops.concatenate(
[
ops.clip(x1, 0, 1),
ops.clip(y1, 0, 1),
ops.clip(x2, 0, 1),
ops.clip(y2, 0, 1),
],
axis=-1,
)
areas = _relative_area(
clipped_bounding_boxes, bounding_box_format="rel_xyxy"
)
clipped_bounding_boxes = bounding_box.convert_format(
clipped_bounding_boxes,
source="rel_xyxy",
target=bounding_box_format,
images=images,
image_shape=image_shape,
)
clipped_bounding_boxes = ops.where(
ops.expand_dims(areas > 0.0, axis=-1), clipped_bounding_boxes, -1.0
)
classes = ops.where(areas > 0.0, classes, -1)
nan_indices = ops.any(ops.isnan(clipped_bounding_boxes), axis=-1)
classes = ops.where(nan_indices, -1, classes)
# TODO update dict and return
clipped_bounding_boxes, classes = _format_outputs(
clipped_bounding_boxes, classes, squeeze
)
result = bounding_boxes.copy()
result["boxes"] = clipped_bounding_boxes
result["classes"] = classes
return result
# TODO (tanzhenyu): merge with clip_to_image
def _clip_boxes(boxes, box_format, image_shape):
"""Clip boxes to the boundaries of the image shape"""
if boxes.shape[-1] != 4:
raise ValueError(
"boxes.shape[-1] is {:d}, but must be 4.".format(boxes.shape[-1])
)
if isinstance(image_shape, list) or isinstance(image_shape, tuple):
height, width, _ = image_shape
max_length = [height, width, height, width]
else:
image_shape = ops.cast(image_shape, dtype=boxes.dtype)
height = image_shape[0]
width = image_shape[1]
max_length = ops.stack([height, width, height, width], axis=-1)
clipped_boxes = ops.maximum(ops.minimum(boxes, max_length), 0.0)
return clipped_boxes
def _format_inputs(boxes, classes, images):
boxes_rank = len(boxes.shape)
if boxes_rank > 3:
raise ValueError(
"Expected len(boxes.shape)=2, or len(boxes.shape)=3, got "
f"len(boxes.shape)={boxes_rank}"
)
boxes_includes_batch = boxes_rank == 3
# Determine if images needs an expand_dims() call
if images is not None:
images_rank = len(images.shape)
if images_rank > 4:
raise ValueError(
"Expected len(images.shape)=2, or len(images.shape)=3, got "
f"len(images.shape)={images_rank}"
)
images_include_batch = images_rank == 4
if boxes_includes_batch != images_include_batch:
raise ValueError(
"clip_to_image() expects both boxes and images to be batched, "
"or both boxes and images to be unbatched. Received "
f"len(boxes.shape)={boxes_rank}, "
f"len(images.shape)={images_rank}. Expected either "
"len(boxes.shape)=2 AND len(images.shape)=3, or "
"len(boxes.shape)=3 AND len(images.shape)=4."
)
if not images_include_batch:
images = ops.expand_dims(images, axis=0)
if not boxes_includes_batch:
return (
ops.expand_dims(boxes, axis=0),
ops.expand_dims(classes, axis=0),
images,
True,
)
return boxes, classes, images, False
def _format_outputs(boxes, classes, squeeze):
if squeeze:
return ops.squeeze(boxes, axis=0), ops.squeeze(classes, axis=0)
return boxes, classes
| keras-cv/keras_cv/bounding_box/utils.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/utils.py",
"repo_id": "keras-cv",
"token_count": 2894
} | 6 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.core.factor_sampler.factor_sampler import FactorSampler
@keras_cv_export("keras_cv.core.NormalFactorSampler")
class NormalFactorSampler(FactorSampler):
"""NormalFactorSampler samples factors from a normal distribution.
This is useful in cases where a user wants to always ensure that an
augmentation layer performs augmentations of the same strength.
Args:
mean: mean value for the distribution.
stddev: standard deviation of the distribution.
min_value: values below min_value are clipped to min_value.
max_value: values above max_value are clipped to max_value.
Usage:
```python
factor = keras_cv.core.NormalFactor(
mean=0.5,
stddev=0.1,
lower=0,
upper=1
)
random_sharpness = keras_cv.layers.RandomSharpness(factor=factor)
# random_sharpness will now sample normally around 0.5, with a lower of 0
# and upper bound of 1.
```
"""
def __init__(self, mean, stddev, min_value, max_value, seed=None):
self.mean = mean
self.stddev = stddev
self.min_value = min_value
self.max_value = max_value
self.seed = seed
def __call__(self, shape=(), dtype="float32"):
return tf.clip_by_value(
tf.random.normal(
shape=shape,
mean=self.mean,
stddev=self.stddev,
seed=self.seed,
dtype=dtype,
),
self.min_value,
self.max_value,
)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"min_value": self.min_value,
"max_value": self.max_value,
"seed": self.seed,
}
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/core/factor_sampler/normal_factor_sampler.py/0 | {
"file_path": "keras-cv/keras_cv/core/factor_sampler/normal_factor_sampler.py",
"repo_id": "keras-cv",
"token_count": 1032
} | 7 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Mapping
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.backend import assert_tf_keras
from keras_cv.bounding_box import iou
from keras_cv.layers.object_detection import box_matcher
from keras_cv.layers.object_detection import sampling
from keras_cv.utils import target_gather
@keras.utils.register_keras_serializable(package="keras_cv")
class _RpnLabelEncoder(keras.layers.Layer):
"""Transforms the raw labels into training targets for region proposal
network (RPN).
# TODO(tanzhenyu): consider unifying with _ROISampler.
This is different from _ROISampler for a couple of reasons:
1) This deals with unbatched input, dict of anchors and potentially ragged
labels.
2) This deals with ground truth boxes, while _ROISampler deals with padded
ground truth boxes with value -1 and padded ground truth classes with
value -1.
3) this returns positive class target as 1, while _ROISampler returns
positive class target as-is. (All negative class target are 0)
The final classification loss will use one hot and #num_fg_classes + 1
4) this returns #num_anchors dense targets, while _ROISampler returns
#num_sampled_rois dense targets.
5) this returns all positive box targets, while _ROISampler still samples
positive box targets, while all negative box targets are also ignored
in regression loss.
Args:
anchor_format: The format of bounding boxes for anchors to generate. Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/) for more details on supported bounding box
formats.
ground_truth_box_format: The format of bounding boxes for ground truth
boxes to generate.
positive_threshold: the float threshold to set an anchor to positive match
to gt box. Values above it are positive matches.
negative_threshold: the float threshold to set an anchor to negative match
to gt box. Values below it are negative matches.
samples_per_image: for each image, the number of positive and negative
samples to generate.
positive_fraction: the fraction of positive samples to the total samples.
""" # noqa: E501
def __init__(
self,
anchor_format,
ground_truth_box_format,
positive_threshold,
negative_threshold,
samples_per_image,
positive_fraction,
box_variance=[0.1, 0.1, 0.2, 0.2],
**kwargs,
):
assert_tf_keras("keras_cv.layers._RpnLabelEncoder")
super().__init__(**kwargs)
self.anchor_format = anchor_format
self.ground_truth_box_format = ground_truth_box_format
self.positive_threshold = positive_threshold
self.negative_threshold = negative_threshold
self.samples_per_image = samples_per_image
self.positive_fraction = positive_fraction
self.box_matcher = box_matcher.BoxMatcher(
thresholds=[negative_threshold, positive_threshold],
match_values=[-1, -2, 1],
force_match_for_each_col=False,
)
self.box_variance = box_variance
self.built = True
self._positives = keras.metrics.Mean(name="percent_boxes_matched")
def call(
self,
anchors_dict: Mapping[str, tf.Tensor],
gt_boxes: tf.Tensor,
gt_classes: tf.Tensor,
):
"""
Args:
anchors_dict: dict of [num_anchors, 4] or [batch_size, num_anchors, 4]
float Tensor for each level.
gt_boxes: [num_gt, 4] or [batch_size, num_anchors] float Tensor.
gt_classes: [num_gt, 1] float or integer Tensor.
Returns:
box_targets: dict of [num_anchors, 4] or for each level.
box_weights: dict of [num_anchors, 1] for each level.
class_targets: dict of [num_anchors, 1] for each level.
class_weights: dict of [num_anchors, 1] for each level.
"""
pack = False
anchors = anchors_dict
if isinstance(anchors, dict):
pack = True
anchors = tf.concat(tf.nest.flatten(anchors), axis=0)
anchors = bounding_box.convert_format(
anchors, source=self.anchor_format, target="yxyx"
)
gt_boxes = bounding_box.convert_format(
gt_boxes, source=self.ground_truth_box_format, target="yxyx"
)
# [num_anchors, num_gt] or [batch_size, num_anchors, num_gt]
similarity_mat = iou.compute_iou(
anchors, gt_boxes, bounding_box_format="yxyx"
)
# [num_anchors] or [batch_size, num_anchors]
matched_gt_indices, matched_vals = self.box_matcher(similarity_mat)
# [num_anchors] or [batch_size, num_anchors]
positive_matches = tf.math.equal(matched_vals, 1)
# currently SyncOnReadVariable does not support `assign_add` in
# cross-replica.
# self._positives.update_state(
# tf.reduce_sum(tf.cast(positive_matches, tf.float32), axis=-1)
# )
negative_matches = tf.math.equal(matched_vals, -1)
# [num_anchors, 4] or [batch_size, num_anchors, 4]
matched_gt_boxes = target_gather._target_gather(
gt_boxes, matched_gt_indices
)
# [num_anchors, 4] or [batch_size, num_anchors, 4], used as `y_true` for
# regression loss
encoded_box_targets = bounding_box._encode_box_to_deltas(
anchors,
matched_gt_boxes,
anchor_format="yxyx",
box_format="yxyx",
variance=self.box_variance,
)
# [num_anchors, 1] or [batch_size, num_anchors, 1]
box_sample_weights = tf.cast(
positive_matches[..., tf.newaxis], gt_boxes.dtype
)
# [num_anchors, 1] or [batch_size, num_anchors, 1]
positive_mask = tf.expand_dims(positive_matches, axis=-1)
# set all negative and ignored matches to 0, and all positive matches to
# 1 [num_anchors, 1] or [batch_size, num_anchors, 1]
positive_classes = tf.ones_like(positive_mask, dtype=gt_classes.dtype)
negative_classes = tf.zeros_like(positive_mask, dtype=gt_classes.dtype)
# [num_anchors, 1] or [batch_size, num_anchors, 1]
class_targets = tf.where(
positive_mask, positive_classes, negative_classes
)
# [num_anchors] or [batch_size, num_anchors]
sampled_indicators = sampling.balanced_sample(
positive_matches,
negative_matches,
self.samples_per_image,
self.positive_fraction,
)
# [num_anchors, 1] or [batch_size, num_anchors, 1]
class_sample_weights = tf.cast(
sampled_indicators[..., tf.newaxis], gt_classes.dtype
)
if pack:
encoded_box_targets = self.unpack_targets(
encoded_box_targets, anchors_dict
)
box_sample_weights = self.unpack_targets(
box_sample_weights, anchors_dict
)
class_targets = self.unpack_targets(class_targets, anchors_dict)
class_sample_weights = self.unpack_targets(
class_sample_weights, anchors_dict
)
return (
encoded_box_targets,
box_sample_weights,
class_targets,
class_sample_weights,
)
def unpack_targets(self, targets, anchors_dict):
target_shape = len(targets.get_shape().as_list())
if target_shape != 2 and target_shape != 3:
raise ValueError(
"unpacking targets must be rank 2 or rank 3, got "
f"{target_shape}"
)
unpacked_targets = {}
count = 0
for level, anchors in anchors_dict.items():
num_anchors_lvl = anchors.get_shape().as_list()[0]
if target_shape == 2:
unpacked_targets[level] = targets[
count : count + num_anchors_lvl, ...
]
else:
unpacked_targets[level] = targets[
:, count : count + num_anchors_lvl, ...
]
count += num_anchors_lvl
return unpacked_targets
def get_config(self):
config = {
"anchor_format": self.anchor_format,
"ground_truth_box_format": self.ground_truth_box_format,
"positive_threshold": self.positive_threshold,
"negative_threshold": self.negative_threshold,
"samples_per_image": self.samples_per_image,
"positive_fraction": self.positive_fraction,
"box_variance": self.box_variance,
}
return config
| keras-cv/keras_cv/layers/object_detection/rpn_label_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/rpn_label_encoder.py",
"repo_id": "keras-cv",
"token_count": 4127
} | 8 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class AugMixTest(TestCase):
def test_return_shapes(self):
layer = preprocessing.AugMix([0, 255])
# RGB
xs = tf.ones((2, 512, 512, 3))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 512, 512, 3))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3))
# greyscale
xs = tf.ones((2, 512, 512, 1))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 512, 512, 1))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 512, 512, 1))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 1))
def test_in_single_image_and_mask(self):
layer = preprocessing.AugMix([0, 255])
# RGB
xs = tf.cast(
tf.ones((512, 512, 3)),
dtype=tf.float32,
)
xs = layer(xs)
ys_segmentation_masks = tf.cast(
tf.ones((512, 512, 3)),
dtype=tf.float32,
)
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (512, 512, 3))
self.assertEqual(ys_segmentation_masks.shape, (512, 512, 3))
# greyscale
xs = tf.cast(
tf.ones((512, 512, 1)),
dtype=tf.float32,
)
xs = layer(xs)
ys_segmentation_masks = tf.cast(
tf.ones((512, 512, 1)),
dtype=tf.float32,
)
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (512, 512, 1))
self.assertEqual(ys_segmentation_masks.shape, (512, 512, 1))
def test_non_square_images_and_masks(self):
layer = preprocessing.AugMix([0, 255])
# RGB
xs = tf.ones((2, 256, 512, 3))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 256, 512, 3))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 256, 512, 3))
self.assertEqual(ys_segmentation_masks.shape, (2, 256, 512, 3))
# greyscale
xs = tf.ones((2, 256, 512, 1))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 256, 512, 1))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 256, 512, 1))
self.assertEqual(ys_segmentation_masks.shape, (2, 256, 512, 1))
def test_single_input_args(self):
layer = preprocessing.AugMix([0, 255])
# RGB
xs = tf.ones((2, 512, 512, 3))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 512, 512, 3))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3))
# greyscale
xs = tf.ones((2, 512, 512, 1))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 512, 512, 1))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 512, 512, 1))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 1))
def test_many_augmentations(self):
layer = preprocessing.AugMix([0, 255], chain_depth=[25, 26])
# RGB
xs = tf.ones((2, 512, 512, 3))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 512, 512, 3))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3))
# greyscale
xs = tf.ones((2, 512, 512, 1))
xs = layer(xs)
ys_segmentation_masks = tf.ones((2, 512, 512, 1))
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 512, 512, 1))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 1))
| keras-cv/keras_cv/layers/preprocessing/aug_mix_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/aug_mix_test.py",
"repo_id": "keras-cv",
"token_count": 2242
} | 9 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
import keras_cv
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.grid_mask import GridMask
from keras_cv.tests.test_case import TestCase
class GridMaskTest(TestCase):
def test_return_shapes(self):
xs = tf.ones((2, 512, 512, 3))
layer = GridMask(ratio_factor=0.1, rotation_factor=(-0.2, 0.3))
xs = layer(xs, training=True)
self.assertEqual(xs.shape, (2, 512, 512, 3))
def test_gridmask_call_results_one_channel(self):
xs = tf.cast(
tf.stack(
[3 * tf.ones((40, 40, 1)), 2 * tf.ones((40, 40, 1))],
axis=0,
),
dtype=tf.float32,
)
fill_value = 0.0
layer = GridMask(
ratio_factor=0.3,
rotation_factor=(0.2, 0.3),
fill_mode="constant",
fill_value=fill_value,
)
xs = layer(xs, training=True)
# Some pixels should be replaced with fill_value
self.assertTrue(
np.any(ops.convert_to_numpy(xs[0]) == float(fill_value))
)
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 3.0))
self.assertTrue(
np.any(ops.convert_to_numpy(xs[1]) == float(fill_value))
)
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
def test_non_square_image(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((1024, 512, 1)), tf.ones((1024, 512, 1))],
axis=0,
),
dtype=tf.float32,
)
fill_value = 100.0
layer = GridMask(
ratio_factor=0.6,
rotation_factor=0.3,
fill_mode="constant",
fill_value=fill_value,
)
xs = layer(xs, training=True)
# Some pixels should be replaced with fill_value
self.assertTrue(
np.any(ops.convert_to_numpy(xs[0]) == float(fill_value))
)
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(
np.any(ops.convert_to_numpy(xs[1]) == float(fill_value))
)
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
@pytest.mark.tf_only
def test_in_tf_function(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((100, 100, 1)), tf.ones((100, 100, 1))], axis=0
),
dtype=tf.float32,
)
fill_value = 255.0
layer = GridMask(
ratio_factor=keras_cv.ConstantFactorSampler(0.5),
rotation_factor=0.5,
fill_mode="constant",
fill_value=fill_value,
)
@tf.function
def augment(x):
return layer(x, training=True)
xs = augment(xs)
# Some pixels should be replaced with fill_value
self.assertTrue(
np.any(ops.convert_to_numpy(xs[0]) == float(fill_value))
)
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(
np.any(ops.convert_to_numpy(xs[1]) == float(fill_value))
)
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
def test_in_single_image(self):
xs = tf.cast(
tf.ones((512, 512, 1)),
dtype=tf.float32,
)
layer = GridMask(
ratio_factor=(0.5, 0.5), fill_mode="constant", fill_value=0.0
)
xs = layer(xs, training=True)
self.assertTrue(np.any(ops.convert_to_numpy(xs) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs) == 1.0))
| keras-cv/keras_cv/layers/preprocessing/grid_mask_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/grid_mask_test.py",
"repo_id": "keras-cv",
"token_count": 2110
} | 10 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers import preprocessing
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
@keras_cv_export("keras_cv.layers.RandomAugmentationPipeline")
class RandomAugmentationPipeline(BaseImageAugmentationLayer):
"""RandomAugmentationPipeline constructs a pipeline based on provided
arguments.
The implemented policy does the following: for each input provided in
`call`(), the policy first inputs a random number, if the number is < rate,
the policy then selects a random layer from the provided list of `layers`.
It then calls the `layer()` on the inputs. This is done
`augmentations_per_image` times.
This layer can be used to create custom policies resembling `RandAugment` or
`AutoAugment`.
Usage:
```python
# construct a list of layers
layers = keras_cv.layers.RandAugment.get_standard_policy(
value_range=(0, 255), magnitude=0.75, magnitude_stddev=0.3
)
layers = layers[:4] # slice out some layers you don't want for whatever
reason
layers = layers + [keras_cv.layers.GridMask()]
# create the pipeline.
pipeline = keras_cv.layers.RandomAugmentationPipeline(
layers=layers, augmentations_per_image=3
)
augmented_images = pipeline(images)
```
Args:
layers: a list of `keras.Layers`. These are randomly inputs during
augmentation to augment the inputs passed in `call()`. The layers
passed should subclass `BaseImageAugmentationLayer`. Passing
`layers=[]` would result in a no-op.
augmentations_per_image: the number of layers to apply to each inputs in
the `call()` method.
rate: the rate at which to apply each augmentation. This is applied on a
per augmentation bases, so if `augmentations_per_image=3` and
`rate=0.5`, the odds an image will receive no augmentations is
0.5^3, or 0.5*0.5*0.5.
auto_vectorize: whether to use `tf.vectorized_map` or `tf.map_fn` to
apply the augmentations. This offers a significant performance
boost, but can only be used if all the layers provided to the
`layers` argument support auto vectorization.
seed: Integer. Used to create a random seed.
"""
def __init__(
self,
layers,
augmentations_per_image,
rate=1.0,
auto_vectorize=False,
seed=None,
**kwargs,
):
super().__init__(**kwargs, seed=seed)
self.augmentations_per_image = augmentations_per_image
self.rate = rate
self.layers = list(layers)
self.auto_vectorize = auto_vectorize
self.seed = seed
self._random_choice = preprocessing.RandomChoice(
layers=layers, auto_vectorize=auto_vectorize, seed=seed
)
def _augment(self, inputs):
if self.layers == []:
return inputs
result = inputs
for _ in range(self.augmentations_per_image):
skip_augment = self._random_generator.uniform(
shape=(), minval=0.0, maxval=1.0, dtype=tf.float32
)
result = tf.cond(
skip_augment > self.rate,
lambda: result,
lambda: self._random_choice(result),
)
return result
def get_config(self):
config = super().get_config()
config.update(
{
"augmentations_per_image": self.augmentations_per_image,
"auto_vectorize": self.auto_vectorize,
"rate": self.rate,
"layers": self.layers,
"seed": self.seed,
}
)
return config
@classmethod
def from_config(cls, config):
layers = config.pop("layers", None)
if layers:
if isinstance(layers[0], dict):
layers = keras.utils.deserialize_keras_object(layers)
config["layers"] = layers
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_augmentation_pipeline.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_augmentation_pipeline.py",
"repo_id": "keras-cv",
"token_count": 1946
} | 11 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import bounding_box
from keras_cv.backend import ops
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class RandomCropAndResizeTest(TestCase):
height, width = 300, 300
batch_size = 4
target_size = (224, 224)
seed = 42
def test_train_augments_image(self):
# Checks if original and augmented images are different
input_image_shape = (self.batch_size, self.height, self.width, 3)
image = tf.random.uniform(shape=input_image_shape, seed=self.seed)
layer = preprocessing.RandomCropAndResize(
target_size=self.target_size,
aspect_ratio_factor=(3 / 4, 4 / 3),
crop_area_factor=(0.8, 1.0),
seed=self.seed,
)
output = layer(image, training=True)
input_image_resized = tf.image.resize(image, self.target_size)
self.assertNotAllClose(output, input_image_resized)
def test_grayscale(self):
input_image_shape = (self.batch_size, self.height, self.width, 1)
image = tf.random.uniform(shape=input_image_shape)
layer = preprocessing.RandomCropAndResize(
target_size=self.target_size,
aspect_ratio_factor=(3 / 4, 4 / 3),
crop_area_factor=(0.8, 1.0),
)
output = layer(image, training=True)
input_image_resized = tf.image.resize(image, self.target_size)
self.assertAllEqual(output.shape, (4, 224, 224, 1))
self.assertNotAllClose(output, input_image_resized)
@parameterized.named_parameters(
("Not tuple or list", dict()),
("Length not equal to 2", [1, 2, 3]),
("Members not int", (2.3, 4.5)),
("Single integer", 5),
)
def test_target_size_errors(self, target_size):
with self.assertRaisesRegex(
ValueError,
"`target_size` must be tuple of two integers. "
"Received target_size=(.*)",
):
_ = preprocessing.RandomCropAndResize(
target_size=target_size,
aspect_ratio_factor=(3 / 4, 4 / 3),
crop_area_factor=(0.8, 1.0),
)
@parameterized.named_parameters(
("Not tuple or list", dict()),
("Single integer", 5),
("Single float", 5.0),
)
def test_aspect_ratio_factor_errors(self, aspect_ratio_factor):
with self.assertRaisesRegex(
ValueError,
"`aspect_ratio_factor` must be tuple of two positive floats or "
"keras_cv.core.FactorSampler instance. "
"Received aspect_ratio_factor=(.*)",
):
_ = preprocessing.RandomCropAndResize(
target_size=(224, 224),
aspect_ratio_factor=aspect_ratio_factor,
crop_area_factor=(0.8, 1.0),
)
@parameterized.named_parameters(
("Not tuple or list", dict()),
("Single integer", 5),
("Single float", 5.0),
)
def test_crop_area_factor_errors(self, crop_area_factor):
with self.assertRaisesRegex(
ValueError,
"`crop_area_factor` must be tuple of two positive floats less than "
"or equal to 1 or keras_cv.core.FactorSampler instance. "
"Received crop_area_factor=(.*)",
):
_ = preprocessing.RandomCropAndResize(
target_size=(224, 224),
aspect_ratio_factor=(3 / 4, 4 / 3),
crop_area_factor=crop_area_factor,
)
def test_augment_sparse_segmentation_mask(self):
num_classes = 8
input_image_shape = (1, self.height, self.width, 3)
mask_shape = (1, self.height, self.width, 1)
image = tf.random.uniform(shape=input_image_shape, seed=self.seed)
mask = tf.constant(
np.random.randint(2, size=mask_shape) * (num_classes - 1)
)
inputs = {"images": image, "segmentation_masks": mask}
# Crop-only to exactly 1/2 of the size
layer = preprocessing.RandomCropAndResize(
target_size=(150, 150),
aspect_ratio_factor=(1, 1),
crop_area_factor=(1, 1),
seed=self.seed,
)
input_mask_resized = tf.image.crop_and_resize(
mask, [[0, 0, 1, 1]], [0], (150, 150), "nearest"
)
output = layer(inputs, training=True)
self.assertAllClose(output["segmentation_masks"], input_mask_resized)
# Crop to an arbitrary size and make sure we don't do bad interpolation
layer = preprocessing.RandomCropAndResize(
target_size=(233, 233),
aspect_ratio_factor=(3 / 4, 4 / 3),
crop_area_factor=(0.8, 1.0),
seed=self.seed,
)
output = layer(inputs, training=True)
self.assertAllInSet(
ops.convert_to_numpy(output["segmentation_masks"]), [0, 7]
)
def test_augment_one_hot_segmentation_mask(self):
num_classes = 8
input_image_shape = (1, self.height, self.width, 3)
mask_shape = (1, self.height, self.width, 1)
image = tf.random.uniform(shape=input_image_shape, seed=self.seed)
mask = tf.one_hot(
tf.squeeze(
np.random.randint(2, size=mask_shape) * (num_classes - 1),
axis=-1,
),
num_classes,
)
inputs = {"images": image, "segmentation_masks": mask}
# Crop-only to exactly 1/2 of the size
layer = preprocessing.RandomCropAndResize(
target_size=(150, 150),
aspect_ratio_factor=(1, 1),
crop_area_factor=(1, 1),
seed=self.seed,
)
input_mask_resized = tf.image.crop_and_resize(
mask, [[0, 0, 1, 1]], [0], (150, 150), "nearest"
)
output = layer(inputs, training=True)
self.assertAllClose(output["segmentation_masks"], input_mask_resized)
def test_augment_bounding_box_single(self):
image = tf.zeros([20, 20, 3])
boxes = {
"boxes": tf.convert_to_tensor([[0, 0, 1, 1]]),
"classes": tf.convert_to_tensor([0]),
}
input = {"images": image, "bounding_boxes": boxes}
layer = preprocessing.RandomCropAndResize(
target_size=(10, 10),
crop_area_factor=(0.5**2, 0.5**2),
aspect_ratio_factor=(1.0, 1.0),
bounding_box_format="rel_xyxy",
)
output = layer(input, training=True)
expected_output = {
"boxes": tf.convert_to_tensor([[0, 0, 1, 1]], dtype=tf.float32),
"classes": tf.convert_to_tensor([0], dtype=tf.float32),
}
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
self.assertAllClose(
expected_output["boxes"], output["bounding_boxes"]["boxes"]
)
self.assertAllClose(
expected_output["classes"], output["bounding_boxes"]["classes"]
)
def test_augment_boxes_batched_input(self):
image = tf.zeros([20, 20, 3])
boxes = {
"boxes": tf.convert_to_tensor(
[
[[0, 0, 1, 1], [0, 0, 1, 1]],
[[0, 0, 1, 1], [0, 0, 1, 1]],
]
),
"classes": tf.convert_to_tensor([[0, 0], [0, 0]]),
}
input = {"images": [image, image], "bounding_boxes": boxes}
layer = preprocessing.RandomCropAndResize(
target_size=(18, 18),
crop_area_factor=(0.5**2, 0.5**2),
aspect_ratio_factor=(1.0, 1.0),
bounding_box_format="rel_xyxy",
)
output = layer(input, training=True)
expected_output = {
"boxes": tf.convert_to_tensor(
[
[[0, 0, 1, 1], [0, 0, 1, 1]],
[[0, 0, 1, 1], [0, 0, 1, 1]],
]
),
"classes": tf.convert_to_tensor([[0, 0], [0, 0]]),
}
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
self.assertAllClose(
expected_output["boxes"], output["bounding_boxes"]["boxes"]
)
self.assertAllClose(
expected_output["classes"], output["bounding_boxes"]["classes"]
)
def test_augment_boxes_ragged(self):
image = tf.zeros([2, 20, 20, 3])
boxes = {
"boxes": tf.ragged.constant(
[[[0, 0, 1, 1], [0, 0, 1, 1]], [[0, 0, 1, 1]]], dtype=tf.float32
),
"classes": tf.ragged.constant([[0, 0], [0]]),
}
input = {"images": image, "bounding_boxes": boxes}
layer = preprocessing.RandomCropAndResize(
target_size=(18, 18),
crop_area_factor=(0.5**2, 0.5**2),
aspect_ratio_factor=(1.0, 1.0),
bounding_box_format="rel_xyxy",
)
output = layer(input, training=True)
# the result boxes will still have the entire image in them
expected_output = {
"boxes": tf.ragged.constant(
[[[0, 0, 1, 1], [0, 0, 1, 1]], [[0, 0, 1, 1]]], dtype=tf.float32
),
"classes": tf.ragged.constant([[0, 0], [0]]),
}
expected_output = bounding_box.to_dense(expected_output)
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
self.assertAllClose(
expected_output["boxes"], output["bounding_boxes"]["boxes"]
)
self.assertAllClose(
expected_output["classes"], output["bounding_boxes"]["classes"]
)
| keras-cv/keras_cv/layers/preprocessing/random_crop_and_resize_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_crop_and_resize_test.py",
"repo_id": "keras-cv",
"token_count": 5074
} | 12 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.RandomSharpness")
class RandomSharpness(VectorizedBaseImageAugmentationLayer):
"""Randomly performs the sharpness operation on given images.
The sharpness operation first performs a blur operation, then blends between
the original image and the blurred image. This operation makes the edges of
an image less sharp than they were in the original image.
References:
- [PIL](https://pillow.readthedocs.io/en/stable/reference/ImageEnhance.html)
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image sharpness is impacted. `factor=0.0` makes this layer perform a
no-op operation, while a value of 1.0 uses the sharpened result
entirely. Values between 0 and 1 result in linear interpolation
between the original image and the sharpened image. Values should be
between `0.0` and `1.0`. If a tuple is used, a `factor` is sampled
between the two values for every image augmented. If a single float
is used, a value between `0.0` and the passed float is sampled. In
order to ensure the value is always the same, please pass a tuple
with two identical floats: `(0.5, 0.5)`.
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is set up.
""" # noqa: E501
def __init__(
self,
factor,
value_range,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.value_range = value_range
self.factor = preprocessing.parse_factor(factor)
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
return self.factor(
shape=(batch_size, 1, 1, 1), dtype=self.compute_dtype
)
def augment_images(self, images, transformations, **kwargs):
images = preprocessing.transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
original_images = images
# [1 1 1]
# [1 5 1]
# [1 1 1]
# all divided by 13 is the default 3x3 gaussian smoothing kernel.
# Correlating or Convolving with this filter is equivalent to performing
# a gaussian blur.
kernel = (
tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]],
dtype=self.compute_dtype,
shape=[3, 3, 1, 1],
)
/ 13.0
)
# Tile across channel dimension.
channels = tf.shape(images)[-1]
kernel = tf.tile(kernel, [1, 1, channels, 1])
strides = [1, 1, 1, 1]
smoothed_image = tf.nn.depthwise_conv2d(
images, kernel, strides, padding="VALID", dilations=[1, 1]
)
smoothed_image = tf.clip_by_value(smoothed_image, 0.0, 255.0)
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(smoothed_image)
padded_mask = tf.pad(mask, [[0, 0], [1, 1], [1, 1], [0, 0]])
padded_smoothed_image = tf.pad(
smoothed_image, [[0, 0], [1, 1], [1, 1], [0, 0]]
)
result = tf.where(
tf.equal(padded_mask, 1), padded_smoothed_image, original_images
)
# Blend the final result.
result = preprocessing.blend(original_images, result, transformations)
result = preprocessing.transform_value_range(
result,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
return result
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints
def augment_ragged_image(self, image, transformation, **kwargs):
images = tf.expand_dims(image, axis=0)
new_transformation = tf.expand_dims(transformation, axis=0)
output = self.augment_images(images, new_transformation)
return tf.squeeze(output, axis=0)
def get_config(self):
config = super().get_config()
config.update(
{
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
)
return config
| keras-cv/keras_cv/layers/preprocessing/random_sharpness.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_sharpness.py",
"repo_id": "keras-cv",
"token_count": 2411
} | 13 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tree
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import config
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend import scope
from keras_cv.utils import preprocessing
H_AXIS = -3
W_AXIS = -2
IMAGES = "images"
LABELS = "labels"
TARGETS = "targets"
BOUNDING_BOXES = "bounding_boxes"
KEYPOINTS = "keypoints"
SEGMENTATION_MASKS = "segmentation_masks"
IS_DICT = "is_dict"
BATCHED = "batched"
USE_TARGETS = "use_targets"
@keras_cv_export("keras_cv.layers.VectorizedBaseImageAugmentationLayer")
class VectorizedBaseImageAugmentationLayer(keras.layers.Layer):
"""Abstract base layer for vectorized image augmentation.
This layer contains base functionalities for preprocessing layers which
augment image related data, e.g. image and in the future, label and bounding
boxes. The subclasses could avoid making certain mistakes and reduce code
duplications.
This layer requires you to implement one method: `augment_images()`, which
augments one single image during the training. There are a few additional
methods that you can implement for added functionality on the layer:
`augment_labels()`, which handles label augmentation if the layer supports
that.
`augment_bounding_boxes()`, which handles the bounding box augmentation, if
the layer supports that.
`get_random_transformations()`, which should produce a batch of random
transformation settings. The transformation object, which must be a batched
Tensor or a dictionary where each input is a batched Tensor, will be passed
to `augment_images`, `augment_labels` and `augment_bounding_boxes`, to
coordinate the randomness behavior, eg, in the RandomFlip layer, the image
and bounding_boxes should be changed in the same way.
The `call()` method support two formats of inputs:
1. Single image tensor with 3D (HWC) or 4D (NHWC) format.
2. A dict of tensors with stable keys. The supported keys are:
`"images"`, `"labels"` and `"bounding_boxes"` at the moment. We might add
more keys in future when we support more types of augmentation.
The output of the `call()` will be in two formats, which will be the same
structure as the inputs.
The `call()` will unpack the inputs, forward to the correct function, and
pack the output back to the same structure as the inputs.
By default, the dense or ragged status of the output will be preserved.
However, you can override this behavior by setting
`self.force_output_dense_images = True`,
`self.force_output_dense_segmentation_masks = True` in your `__init__()`
method. When enabled, images and segmentation masks will be converted to
dense tensor by `to_tensor()` if ragged.
```python
class SubclassLayer(VectorizedBaseImageAugmentationLayer):
def __init__(self):
super().__init__()
self.force_output_dense_images = True
self.force_output_dense_segmentation_masks = True
```
Note that since the randomness is also a common functionality, this layer
also includes a keras_backend.RandomGenerator, which can be used to
produce the random numbers. The random number generator is stored in the
`self._random_generator` attribute.
"""
def __init__(self, seed=None, **kwargs):
super().__init__(**kwargs)
if seed:
self._random_generator = tf.random.Generator.from_seed(seed=seed)
else:
self._random_generator = tf.random.get_global_generator()
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
@property
def force_output_dense_images(self):
"""Control whether to force outputting of dense images."""
return getattr(self, "_force_output_dense_images", False)
@force_output_dense_images.setter
def force_output_dense_images(self, force_output_dense_images):
self._force_output_dense_images = force_output_dense_images
@property
def force_output_dense_segmentation_masks(self):
"""Control whether to force outputting of dense segmentation masks."""
return getattr(self, "_force_output_dense_segmentation_masks", False)
@force_output_dense_segmentation_masks.setter
def force_output_dense_segmentation_masks(
self, force_output_dense_segmentation_masks
):
self._force_output_dense_segmentation_masks = (
force_output_dense_segmentation_masks
)
def augment_ragged_image(self, image, transformation, **kwargs):
"""Augment an image from a ragged image batch during training.
This method accepts a single Dense image Tensor, and returns a Dense
image. The resulting images are then stacked back into a ragged image
batch. The behavior of this method should be identical to that of
`augment_images()` but is to operate on a batch-wise basis.
Args:
image: a single image from the batch
transformation: a single transformation sampled from
`get_random_transformations()`.
kwargs: all the other call arguments (i.e. bounding_boxes, labels,
etc.).
Returns:
Augmented image.
"""
raise NotImplementedError(
"A ragged image batch was passed to layer of type "
f"`{type(self).__name__}`. This layer does not implement "
"`augment_ragged_image()`. If this is a `keras_cv`, open a GitHub "
"issue requesting Ragged functionality on the layer titled: "
f"'`{type(self).__name__}`: ragged image support'. If this is a "
"custom layer, implement the `augment_ragged_image()` method."
)
def compute_ragged_image_signature(self, images):
"""Computes the output image signature for the `augment_image()`
function.
Must be overridden to return tensors with different shapes than the
input images. By default, returns either a `tf.RaggedTensorSpec`
matching the input image spec, or a `tf.TensorSpec` matching the input
image spec.
"""
ragged_spec = tf.RaggedTensorSpec(
shape=images.shape[1:],
ragged_rank=1,
dtype=self.compute_dtype,
)
return ragged_spec
def augment_images(self, images, transformations, **kwargs):
"""Augment a batch of images during training.
Args:
images: 4D image input tensor to the layer. Forwarded from
`layer.call()`. This should generally have the shape [B, H, W, C].
Forwarded from `layer.call()`.
transformations: The transformations object produced by
`get_random_transformations`. Used to coordinate the randomness
between image, label, bounding box, keypoints, and segmentation
mask.
Returns:
output 4D tensor, which will be forward to `layer.call()`.
"""
raise NotImplementedError()
def augment_labels(self, labels, transformations, **kwargs):
"""Augment a batch of labels during training.
Args:
labels: 2D label to the layer. Forwarded from `layer.call()`.
transformations: The transformations object produced by
`get_random_transformations`. Used to coordinate the randomness
between image, label, bounding box, keypoints, and segmentation
mask.
Returns:
output 2D tensor, which will be forward to `layer.call()`.
"""
raise NotImplementedError()
def augment_targets(self, targets, transformations, **kwargs):
"""Augment a batch of targets during training.
Args:
targets: 2D label to the layer. Forwarded from `layer.call()`.
transformations: The transformations object produced by
`get_random_transformations`. Used to coordinate the randomness
between image, label, bounding box, keypoints, and segmentation
mask.
Returns:
output 2D tensor, which will be forward to `layer.call()`.
"""
return self.augment_labels(targets, transformations, **kwargs)
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
"""Augment bounding boxes for one image during training.
Args:
bounding_boxes: 3D bounding boxes to the layer. Forwarded from
`call()`.
transformations: The transformations object produced by
`get_random_transformations`. Used to coordinate the randomness
between image, label, bounding box, keypoints, and segmentation
mask.
Returns:
output 3D tensor, which will be forward to `layer.call()`.
"""
raise NotImplementedError()
def augment_keypoints(self, keypoints, transformations, **kwargs):
"""Augment a batch of keypoints for one image during training.
Args:
keypoints: 3D keypoints input tensor to the layer. Forwarded from
`layer.call()`. Shape should be [batch, num_keypoints, 2] in the
specified keypoint format.
transformations: The transformations object produced by
`get_random_transformations`. Used to coordinate the randomness
between image, label, bounding box, keypoints, and segmentation
mask.
Returns:
output 3D tensor, which will be forward to `layer.call()`.
"""
raise NotImplementedError()
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
"""Augment a batch of images' segmentation masks during training.
Args:
segmentation_masks: 4D segmentation mask input tensor to the layer.
This should generally have the shape [B, H, W, 1], or in some cases
[B, H, W, C] for multilabeled data. Forwarded from `layer.call()`.
transformations: The transformations object produced by
`get_random_transformations`. Used to coordinate the randomness
between image, label, bounding box, keypoints, and segmentation
mask.
Returns:
output 4D tensor containing the augmented segmentation mask, which
will be forward to `layer.call()`.
"""
raise NotImplementedError()
def get_random_transformation_batch(
self,
batch_size,
images=None,
labels=None,
bounding_boxes=None,
keypoints=None,
segmentation_masks=None,
):
"""Produce random transformations config for a batch of inputs.
This is used to produce same randomness between
image/label/bounding_box.
Args:
batch_size: the batch size of transformations configuration to sample.
images: 3D image tensor from inputs.
labels: optional 1D label tensor from inputs.
bounding_boxes: optional 2D bounding boxes tensor from inputs.
segmentation_masks: optional 3D segmentation mask tensor from inputs.
Returns:
Any type of object, which will be forwarded to `augment_images`,
`augment_labels` and `augment_bounding_boxes` as the `transformations`
parameter.
"""
# Required to work with map_fn in the ragged cast.
return tf.zeros((batch_size))
def _unwrap_ragged_image_call(self, inputs):
images = inputs.get(IMAGES, None)
labels = inputs.get(LABELS, None)
bounding_boxes = inputs.get(BOUNDING_BOXES, None)
keypoints = inputs.get(KEYPOINTS, None)
segmentation_masks = inputs.get(SEGMENTATION_MASKS, None)
transformation = inputs.get("transformations")
images = images.to_tensor()
images = self.augment_ragged_image(
image=images,
label=labels,
bounding_boxes=bounding_boxes,
keypoints=keypoints,
segmentation_mask=segmentation_masks,
transformation=transformation,
)
return tf.RaggedTensor.from_tensor(images)
def _batch_augment(self, inputs):
images = inputs.get(IMAGES, None)
raw_images = images
labels = inputs.get(LABELS, None)
bounding_boxes = inputs.get(BOUNDING_BOXES, None)
keypoints = inputs.get(KEYPOINTS, None)
segmentation_masks = inputs.get(SEGMENTATION_MASKS, None)
batch_size = tf.shape(images)[0]
transformations = self.get_random_transformation_batch(
batch_size,
images=images,
labels=labels,
bounding_boxes=bounding_boxes,
keypoints=keypoints,
segmentation_masks=segmentation_masks,
)
if isinstance(images, tf.RaggedTensor):
inputs_for_raggeds = {"transformations": transformations, **inputs}
images = tf.map_fn(
self._unwrap_ragged_image_call,
inputs_for_raggeds,
fn_output_signature=self.compute_ragged_image_signature(images),
)
else:
images = self.augment_images(
images,
transformations=transformations,
bounding_boxes=bounding_boxes,
labels=labels,
)
if (
isinstance(images, tf.RaggedTensor)
and self.force_output_dense_images
):
images = images.to_tensor()
result = {IMAGES: images}
if labels is not None:
labels = self.augment_targets(
labels,
transformations=transformations,
bounding_boxes=bounding_boxes,
images=images,
raw_images=raw_images,
)
result[LABELS] = labels
if bounding_boxes is not None:
bounding_boxes = self.augment_bounding_boxes(
bounding_boxes,
transformations=transformations,
labels=labels,
images=images,
raw_images=raw_images,
)
bounding_boxes = bounding_box.to_ragged(bounding_boxes)
result[BOUNDING_BOXES] = bounding_boxes
if keypoints is not None:
keypoints = self.augment_keypoints(
keypoints,
transformations=transformations,
labels=labels,
bounding_boxes=bounding_boxes,
images=images,
raw_images=raw_images,
)
result[KEYPOINTS] = keypoints
if segmentation_masks is not None:
segmentation_masks = self.augment_segmentation_masks(
segmentation_masks,
transformations=transformations,
labels=labels,
bounding_boxes=bounding_boxes,
images=images,
raw_images=raw_images,
)
if (
isinstance(segmentation_masks, tf.RaggedTensor)
and self.force_output_dense_segmentation_masks
):
segmentation_masks = segmentation_masks.to_tensor()
result[SEGMENTATION_MASKS] = segmentation_masks
# preserve any additional inputs unmodified by this layer.
for key in inputs.keys() - result.keys():
result[key] = inputs[key]
return result
def call(self, inputs):
# try to convert a given backend native tensor to TensorFlow tensor
# before passing it over to TFDataScope
is_tf_backend = config.backend() == "tensorflow"
is_in_tf_graph = not tf.executing_eagerly()
contains_ragged = lambda y: any(
tree.map_structure(
lambda x: isinstance(x, (tf.RaggedTensor, tf.SparseTensor)),
tree.flatten(y),
)
)
inputs_contain_ragged = contains_ragged(inputs)
if not is_tf_backend and not inputs_contain_ragged:
inputs = tree.map_structure(
lambda x: tf.convert_to_tensor(x), inputs
)
with scope.TFDataScope():
inputs = self._ensure_inputs_are_compute_dtype(inputs)
inputs, metadata = self._format_inputs(inputs)
images = inputs[IMAGES]
if images.shape.rank == 3 or images.shape.rank == 4:
outputs = self._format_output(
self._batch_augment(inputs), metadata
)
else:
raise ValueError(
"Image augmentation layers are expecting inputs to be "
"rank 3 (HWC) or 4D (NHWC) tensors. Got shape: "
f"{images.shape}"
)
# convert the outputs to backend native tensors if none of them
# contain RaggedTensors. Note that if the user passed in Raggeds
# but the outputs are dense, we still don't want to convert to
# backend native tensors. This is to avoid breaking TF data
# pipelines that can't easily be ported to become backend
# agnostic.
if not is_tf_backend and not is_in_tf_graph:
if not inputs_contain_ragged and not contains_ragged(outputs):
outputs = tree.map_structure(
# some layers return None, handle that case when
# converting to tensors
lambda x: ops.convert_to_tensor(x) if x is not None else x,
outputs,
)
return outputs
def _format_inputs(self, inputs):
metadata = {IS_DICT: True, USE_TARGETS: False}
if tf.is_tensor(inputs):
# single image input tensor
metadata[IS_DICT] = False
inputs = {IMAGES: inputs}
else:
# Copy the input dict before we mutate it.
inputs = dict(inputs)
metadata[BATCHED] = inputs["images"].shape.rank == 4
if inputs["images"].shape.rank == 3:
for key in list(inputs.keys()):
if key == BOUNDING_BOXES:
inputs[BOUNDING_BOXES]["boxes"] = tf.expand_dims(
inputs[BOUNDING_BOXES]["boxes"], axis=0
)
inputs[BOUNDING_BOXES]["classes"] = tf.expand_dims(
inputs[BOUNDING_BOXES]["classes"], axis=0
)
else:
inputs[key] = tf.expand_dims(inputs[key], axis=0)
if not isinstance(inputs, dict):
raise ValueError(
"Expect the inputs to be image tensor or dict. Got "
f"inputs={inputs}"
)
if BOUNDING_BOXES in inputs:
inputs[BOUNDING_BOXES] = self._format_bounding_boxes(
inputs[BOUNDING_BOXES]
)
if isinstance(inputs, dict) and TARGETS in inputs:
# TODO(scottzhu): Check if it only contains the valid keys
inputs[LABELS] = inputs[TARGETS]
del inputs[TARGETS]
metadata[USE_TARGETS] = True
return inputs, metadata
return inputs, metadata
def _format_output(self, output, metadata):
if not metadata[BATCHED]:
for key in list(output.keys()):
if key == BOUNDING_BOXES:
output[BOUNDING_BOXES]["boxes"] = tf.squeeze(
output[BOUNDING_BOXES]["boxes"], axis=0
)
output[BOUNDING_BOXES]["classes"] = tf.squeeze(
output[BOUNDING_BOXES]["classes"], axis=0
)
else:
output[key] = tf.squeeze(output[key], axis=0)
if not metadata[IS_DICT]:
return output[IMAGES]
elif metadata[USE_TARGETS]:
output[TARGETS] = output[LABELS]
del output[LABELS]
return output
def _ensure_inputs_are_compute_dtype(self, inputs):
if not isinstance(inputs, dict):
return preprocessing.ensure_tensor(
inputs,
self.compute_dtype,
)
# Copy the input dict before we mutate it.
inputs = dict(inputs)
inputs[IMAGES] = preprocessing.ensure_tensor(
inputs[IMAGES],
self.compute_dtype,
)
if LABELS in inputs:
inputs[LABELS] = preprocessing.ensure_tensor(
inputs[LABELS],
self.compute_dtype,
)
if KEYPOINTS in inputs:
inputs[KEYPOINTS] = preprocessing.ensure_tensor(
inputs[KEYPOINTS],
self.compute_dtype,
)
if SEGMENTATION_MASKS in inputs:
inputs[SEGMENTATION_MASKS] = preprocessing.ensure_tensor(
inputs[SEGMENTATION_MASKS],
self.compute_dtype,
)
if BOUNDING_BOXES in inputs:
inputs[BOUNDING_BOXES]["boxes"] = preprocessing.ensure_tensor(
inputs[BOUNDING_BOXES]["boxes"],
self.compute_dtype,
)
inputs[BOUNDING_BOXES]["classes"] = preprocessing.ensure_tensor(
inputs[BOUNDING_BOXES]["classes"],
self.compute_dtype,
)
return inputs
def _format_bounding_boxes(self, bounding_boxes):
# We can't catch the case where this is None, sometimes RaggedTensor
# drops this dimension.
if "classes" not in bounding_boxes:
raise ValueError(
"Bounding boxes are missing class_id. If you would like to pad "
"the bounding boxes with class_id, use: "
"`bounding_boxes['classes'] = "
"tf.ones_like(bounding_boxes['boxes'])`."
)
return bounding_boxes
| keras-cv/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/vectorized_base_image_augmentation_layer.py",
"repo_id": "keras-cv",
"token_count": 9889
} | 14 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.global_random_dropping_points import ( # noqa: E501
GlobalRandomDroppingPoints,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class GlobalDropPointsTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=0.5)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_specific_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=0.5)
point_clouds = np.random.random(size=(1, 50, 2)).astype("float32")
point_clouds = np.concatenate([point_clouds, point_clouds], axis=0)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
# The augmented point clouds in the first frame should be the same as
# the augmented point clouds in the second frame.
self.assertAllClose(outputs[POINT_CLOUDS][0], outputs[POINT_CLOUDS][1])
def test_not_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=0.0)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_drop_all_point_clouds(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=1.0)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs[POINT_CLOUDS] * 0.0, outputs[POINT_CLOUDS])
def test_exclude_all_points(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=1.0, exclude_classes=1)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
exclude_classes = np.ones(shape=(2, 50, 1)).astype("float32")
point_clouds = np.concatenate([point_clouds, exclude_classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_exclude_the_first_half_points(self):
add_layer = GlobalRandomDroppingPoints(
drop_rate=1.0, exclude_classes=[1, 2]
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
class_1 = np.ones(shape=(2, 10, 1)).astype("float32")
class_2 = np.ones(shape=(2, 15, 1)).astype("float32") * 2
classes = np.concatenate(
[class_1, class_2, np.zeros(shape=(2, 25, 1)).astype("float32")],
axis=1,
)
point_clouds = np.concatenate([point_clouds, classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(
inputs[POINT_CLOUDS][:, 25:, :] * 0.0,
outputs[POINT_CLOUDS][:, 25:, :],
)
self.assertAllClose(
inputs[POINT_CLOUDS][:, :25, :], outputs[POINT_CLOUDS][:, :25, :]
)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=0.5)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_dropping_points_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_dropping_points_test.py",
"repo_id": "keras-cv",
"token_count": 2057
} | 15 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.swap_background import (
SwapBackground,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
ADDITIONAL_POINT_CLOUDS = base_augmentation_layer_3d.ADDITIONAL_POINT_CLOUDS
ADDITIONAL_BOUNDING_BOXES = base_augmentation_layer_3d.ADDITIONAL_BOUNDING_BOXES
class SwapBackgroundTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = SwapBackground()
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[0, 1, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
additional_point_clouds = np.array(
[
[
[0, 2, 1, 3, 4],
[0, 0, 2, 0, 2],
[0, 11, 2, 3, 4],
[100, 101, 2, 3, 4],
[10, 10, 10, 10, 10],
]
]
* 2
).astype("float32")
additional_bounding_boxes = np.array(
[
[
[0, 0, 1, 4, 4, 4, 0, 1],
[100, 100, 2, 5, 5, 5, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
ADDITIONAL_POINT_CLOUDS: additional_point_clouds,
ADDITIONAL_BOUNDING_BOXES: additional_bounding_boxes,
}
outputs = add_layer(inputs)
# The following points in additional_point_clouds.
# [0, 2, 1, 3, 4], -> kept because it is in additional_point_clouds
# [0, 0, 1, 4, 4, 4, 0, 1].
# [0, 0, 2, 0, 2] -> removed because it is a background point (not in
# any bounding_boxes and additional_point_clouds).
# [0, 11, 2, 3, 4] -> removed because it is a background point (not in
# any bounding_boxes and additional_point_clouds).
# [100, 101, 2, 3, 4] -> kept because it is in additional_point_clouds
# [100, 100, 2, 5, 5, 5, 0, 1].
# [10, 10, 10, 10, 10] -> removed because it is a background point (not
# in any bounding_boxes and additional_point_clouds).
# The following points in point_clouds.
# [0, 1, 2, 3, 4] -> removed because it is in bounding_boxes
# [0, 0, 0, 4, 4, 4, 0, 1].
# [10, 1, 2, 3, 4] -> kept because it is a background point (not in any
# bounding_boxes and additional_point_clouds).
# [0, -1, 2, 3, 4] -> removed because it overlaps with
# additional_bounding_boxes [0, 0, 1, 4, 4, 4, 0, 1].
# [100, 100, 2, 3, 4] -> removed because it overlaps with
# additional_bounding_boxes [100, 100, 2, 5, 5, 5, 0, 1].
# [20, 20, 21, 1, 0] -> kept because it is a background point (not in
# any bounding_boxes and additional_point_clouds).
augmented_point_clouds = np.array(
[
[
[0, 2, 1, 3, 4],
[100, 101, 2, 3, 4],
[10, 1, 2, 3, 4],
[20, 20, 21, 1, 0],
[0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[0, 0, 1, 4, 4, 4, 0, 1],
[100, 100, 2, 5, 5, 5, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
).astype("float32")
self.assertAllClose(
inputs[ADDITIONAL_POINT_CLOUDS], outputs[ADDITIONAL_POINT_CLOUDS]
)
self.assertAllClose(
inputs[ADDITIONAL_BOUNDING_BOXES],
outputs[ADDITIONAL_BOUNDING_BOXES],
)
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = SwapBackground()
# point_clouds: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of points, num of point features].
# The first 5 features are [x, y, z, class, range].
point_clouds = np.array(
[
[
[
[0, 1, 2, 3, 4],
[10, 1, 2, 3, 4],
[0, -1, 2, 3, 4],
[100, 100, 2, 3, 4],
[20, 20, 21, 1, 0],
]
]
* 2
]
* 3
).astype("float32")
# bounding_boxes: 3D (multi frames) float32 Tensor with shape
# [num of frames, num of boxes, num of box features].
# The first 8 features are [x, y, z, dx, dy, dz, phi, box class].
bounding_boxes = np.array(
[
[
[
[0, 0, 0, 4, 4, 4, 0, 1],
[20, 20, 20, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
additional_point_clouds = np.array(
[
[
[
[0, 2, 1, 3, 4],
[0, 0, 2, 0, 2],
[0, 11, 2, 3, 4],
[100, 101, 2, 3, 4],
[10, 10, 10, 10, 10],
]
]
* 2
]
* 3
).astype("float32")
additional_bounding_boxes = np.array(
[
[
[
[0, 0, 1, 4, 4, 4, 0, 1],
[100, 100, 2, 5, 5, 5, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
ADDITIONAL_POINT_CLOUDS: additional_point_clouds,
ADDITIONAL_BOUNDING_BOXES: additional_bounding_boxes,
}
outputs = add_layer(inputs)
# The following points in additional_point_clouds.
# [0, 2, 1, 3, 4], -> kept because it is in additional_point_clouds
# [0, 0, 1, 4, 4, 4, 0, 1].
# [0, 0, 2, 0, 2] -> removed because it is a background point (not in
# any bounding_boxes and additional_point_clouds).
# [0, 11, 2, 3, 4] -> removed because it is a background point (not in
# any bounding_boxes and additional_point_clouds).
# [100, 101, 2, 3, 4] -> kept because it is in additional_point_clouds
# [100, 100, 2, 5, 5, 5, 0, 1].
# [10, 10, 10, 10, 10] -> removed because it is a background point (not
# in any bounding_boxes and additional_point_clouds).
# The following points in point_clouds.
# [0, 1, 2, 3, 4] -> removed because it is in bounding_boxes\
# [0, 0, 0, 4, 4, 4, 0, 1].
# [10, 1, 2, 3, 4] -> kept because it is a background point (not in any
# bounding_boxes and additional_point_clouds).
# [0, -1, 2, 3, 4] -> removed because it overlaps with
# additional_bounding_boxes [0, 0, 1, 4, 4, 4, 0, 1].
# [100, 100, 2, 3, 4] -> removed because it overlaps with
# additional_bounding_boxes [100, 100, 2, 5, 5, 5, 0, 1].
# [20, 20, 21, 1, 0] -> kept because it is a background point (not in
# any bounding_boxes and additional_point_clouds).
augmented_point_clouds = np.array(
[
[
[
[0, 2, 1, 3, 4],
[100, 101, 2, 3, 4],
[10, 1, 2, 3, 4],
[20, 20, 21, 1, 0],
[0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
augmented_bounding_boxes = np.array(
[
[
[
[0, 0, 1, 4, 4, 4, 0, 1],
[100, 100, 2, 5, 5, 5, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
]
* 2
]
* 3
).astype("float32")
self.assertAllClose(
inputs[ADDITIONAL_POINT_CLOUDS], outputs[ADDITIONAL_POINT_CLOUDS]
)
self.assertAllClose(
inputs[ADDITIONAL_BOUNDING_BOXES],
outputs[ADDITIONAL_BOUNDING_BOXES],
)
self.assertAllClose(outputs[POINT_CLOUDS], augmented_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], augmented_bounding_boxes)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/swap_background_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/swap_background_test.py",
"repo_id": "keras-cv",
"token_count": 6396
} | 16 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
class MLP(keras.layers.Layer):
"""A MLP block with architecture
`input_dim -> [hidden_dim] * (num_layers - 1) -> output_dim`.
Args:
hidden_dim (int): The number of units in the hidden layers.
output_dim (int): The number of units in the output layer.
num_layers (int): The total number of dense layers to use.
activation (str): Activation to use in the hidden layers.
Default is `"relu"`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
- [Detectron2](https://github.com/facebookresearch/detectron2)
""" # noqa: E501
def __init__(
self, hidden_dim, output_dim, num_layers, activation="relu", **kwargs
):
super().__init__(**kwargs)
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.num_layers = num_layers
self.activation = activation
h = [hidden_dim] * (num_layers - 1)
self.dense_net = []
for hidden_dim in h:
self.dense_net.append(keras.layers.Dense(hidden_dim))
self.dense_net.append(keras.layers.Activation(activation))
self.dense_net.append(keras.layers.Dense(output_dim))
self.dense_net = keras.models.Sequential(self.dense_net)
def build(self, input_shape):
self.dense_net.build(input_shape)
self.built = True
def call(self, x):
return self.dense_net(x)
def get_config(self):
config = super().get_config()
config.update(
{
"hidden_dim": self.hidden_dim,
"output_dim": self.output_dim,
"num_layers": self.num_layers,
"activation": self.activation,
}
)
return config
@keras_cv_export(
"keras_cv.layers.AddRelativePositionalEmbedding", package="keras_cv.layers"
)
class AddRelativePositionalEmbedding(keras.layers.Layer):
def __init__(self, input_size, key_dim, **kwargs):
super().__init__(**kwargs)
self.input_size = input_size
self.key_dim = key_dim
self.rel_pos_h = self.add_weight(
name="rel_pos_h",
shape=(2 * self.input_size[0] - 1, self.key_dim),
initializer="zeros",
trainable=True,
)
self.rel_pos_w = self.add_weight(
name="rel_pos_w",
shape=(2 * self.input_size[1] - 1, self.key_dim),
initializer="zeros",
trainable=True,
)
self.built = True
def _get_rel_pos(self, query_size, key_size, rel_pos):
"""
Get relative positional embeddings according to the relative positions
of query and key sizes.
Args:
query_size (int): The number of features of the queries.
key_size (int): The number of features of the keys.
rel_pos (tensor): Relative positional embedding tensor.
Returns:
tensor: Extracted positional embeddings according to relative
positions.
"""
max_rel_dist = 2 * max(query_size, key_size) - 1
if ops.shape(rel_pos)[0] != max_rel_dist:
rel_pos_resized = ops.image.resize(
image=ops.reshape(
rel_pos,
(1, ops.shape(rel_pos)[0], ops.shape(rel_pos)[1], 1),
),
size=(max_rel_dist, ops.shape(rel_pos)[1]),
interpolation="bilinear",
)
rel_pos_resized = ops.squeeze(rel_pos_resized, axis=(0, -1))
return rel_pos_resized
else:
rel_pos_resized = rel_pos
query_coordinates = ops.cast(
ops.arange(query_size), dtype=self.compute_dtype
)[:, None] * (max(key_size / query_size, 1.0))
key_coordinates = ops.cast(
ops.arange(key_size), dtype=self.compute_dtype
)[None, :] * (max(query_size / key_size, 1.0))
relative_coordinates = (query_coordinates - key_coordinates) + (
key_size - 1
) * max(query_size / key_size, 1.0)
relative_coordinates = ops.cast(relative_coordinates, dtype="int32")
return ops.take(rel_pos_resized, relative_coordinates, 0)
def call(self, attention_map, queries, query_size, key_size):
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
Args:
attention_map (tensor): Attention map.
queries (tensor): Queries in the attention layer with shape
`(B, q_h * q_w, C)`.
query_size (tuple[int, int]): Spatial sequence size of queries with
`(q_h, q_w)`.
key_size (tuple[int, int]): Spatial sequence size of keys with
`(k_h, k_w)`.
Returns:
tensor: attention map with added relative positional embeddings.
References:
- https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa: E501
"""
query_height, query_width = query_size[0], query_size[1]
key_height, key_width = key_size[0], key_size[1]
rel_heights = self._get_rel_pos(
query_height, key_height, self.rel_pos_h
)
rel_widths = self._get_rel_pos(query_width, key_width, self.rel_pos_w)
shape = ops.shape(queries)
B, C = shape[0], shape[2]
rel_queries = ops.reshape(queries, (B, query_height, query_width, C))
rel_heights = ops.einsum("bhwc,hkc->bhwk", rel_queries, rel_heights)
rel_widths = ops.einsum("bhwc,wkc->bhwk", rel_queries, rel_widths)
attention_map = ops.reshape(
attention_map, (B, query_height, query_width, key_height, key_width)
)
attention_map = attention_map + rel_heights[..., :, None]
attention_map = attention_map + rel_widths[..., None, :]
attention_map = ops.reshape(
attention_map,
(B, query_height * query_width, key_height * key_width),
)
return attention_map
def get_config(self):
config = super().get_config()
config.update({"input_size": self.input_size, "key_dim": self.key_dim})
return config
@keras_cv_export(
"keras_cv.layers.MultiHeadAttentionWithRelativePE",
package="keras_cv.layers",
)
class MultiHeadAttentionWithRelativePE(keras.layers.Layer):
"""Multi-head Attention block with relative position embeddings.
Args:
num_heads (int): Number of attention heads.
key_dim (int): Size of each attention head for query, key, and
value.
use_bias (bool, optional): Whether to use bias when projecting
the queries, keys, and values. Defaults to `True`.
use_rel_pos (bool, optional): Whether to use relative positional
embeddings or not. Defaults to `False`.
input_size (tuple[int, int], optional): Size of the input image.
Must be provided when using relative positional embeddings.
Defaults to `None`.
Raises:
ValueError: When `input_size = None` with `use_rel_pos = True`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
- [Detectron2](https://github.com/facebookresearch/detectron2)
""" # noqa: E501
def __init__(
self,
num_heads,
key_dim,
use_bias=True,
use_rel_pos=False,
input_size=None,
**kwargs
):
super().__init__(**kwargs)
self.num_heads = num_heads
self.key_dim = key_dim
self.scale = self.key_dim**-0.5
self.use_bias = use_bias
self.input_size = input_size
self.use_rel_pos = use_rel_pos
self.qkv = keras.layers.Dense(
key_dim * self.num_heads * 3, use_bias=self.use_bias
)
self.projection = keras.layers.Dense(key_dim * self.num_heads)
if self.use_rel_pos:
if input_size is None:
raise ValueError(
"Input size must be provided if using relative "
"positional encoding."
)
self.add_decomposed_reative_pe = AddRelativePositionalEmbedding(
self.input_size, self.key_dim
)
def build(self, input_shape=None):
self.qkv.build([self.key_dim * self.num_heads])
self.projection.build([self.key_dim * self.num_heads])
self.built = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, x):
shape = ops.shape(x)
B, H, W, C = shape[0], shape[1], shape[2], shape[3]
qkv = ops.transpose(
ops.reshape(
self.qkv(x), (B, H * W, 3, self.num_heads, self.key_dim)
),
axes=(2, 0, 3, 1, 4),
)
qkv = ops.reshape(qkv, (3, B * self.num_heads, H * W, self.key_dim))
queries, keys, values = ops.unstack(qkv, axis=0)
attention_map = (queries * self.scale) @ ops.transpose(
keys, axes=(0, 2, 1)
)
if self.use_rel_pos:
attention_map = self.add_decomposed_reative_pe(
attention_map,
queries=queries,
query_size=(H, W),
key_size=(H, W),
)
attention_map = ops.softmax(attention_map, axis=-1)
x = ops.reshape(
attention_map @ values, (B, self.num_heads, H, W, self.key_dim)
)
x = ops.transpose(x, axes=(0, 2, 3, 1, 4))
x = ops.reshape(x, (B, H, W, C))
x = self.projection(x)
return x
def get_config(self):
config = super().get_config()
config.update(
{
"num_heads": self.num_heads,
"key_dim": self.key_dim,
"use_bias": self.use_bias,
"use_rel_pos": self.use_rel_pos,
"input_size": self.input_size,
}
)
return config
@keras_cv_export(
"keras_cv.layers.WindowPartitioning", package="keras_cv.layers"
)
class WindowPartitioning(keras.layers.Layer):
def __init__(self, window_size, **kwargs):
super().__init__(**kwargs)
self.window_size = window_size
self.built = True
def partition(self, x):
shape = ops.shape(x)
B, H, W, C = shape[0], shape[1], shape[2], shape[3]
pad_height = (
self.window_size - H % self.window_size
) % self.window_size
pad_width = (self.window_size - W % self.window_size) % self.window_size
if pad_height > 0 or pad_width > 0:
x = ops.pad(x, ((0, 0), (0, pad_height), (0, pad_width), (0, 0)))
H_padded, W_padded = H + pad_height, W + pad_width
x = ops.reshape(
x,
(
B,
H_padded // self.window_size,
self.window_size,
W_padded // self.window_size,
self.window_size,
C,
),
)
windows = ops.reshape(
ops.transpose(x, axes=(0, 1, 3, 2, 4, 5)),
(-1, self.window_size, self.window_size, C),
)
return windows, (H_padded, W_padded)
def unpartition(self, windows, HW_padded, HW):
H_padded, W_padded = HW_padded
H, W = HW
B = ops.shape(windows)[0] // (
(H_padded // self.window_size) * (W_padded // self.window_size)
)
x = ops.reshape(
windows,
(
B,
H_padded // self.window_size,
W_padded // self.window_size,
self.window_size,
self.window_size,
-1,
),
)
x = ops.reshape(
ops.transpose(x, axes=(0, 1, 3, 2, 4, 5)),
(B, H_padded, W_padded, -1),
)
return x[:, :H, :W, :]
def get_config(self):
config = super().get_config()
config.update({"window_size": self.window_size})
return config
@keras_cv_export(
"keras_cv.layers.WindowedTransformerEncoder", package="keras_cv.layers"
)
class WindowedTransformerEncoder(keras.layers.Layer):
"""Transformer blocks with support of window attention and residual
propagation blocks.
Args:
project_dim (int): the dimensionality of the projection of the
encoder, and output of the `MultiHeadAttention`.
mlp_dim (int): the intermediate dimensionality of the MLP head before
projecting to `project_dim`.
num_heads (int): the number of heads for the `MultiHeadAttention`
layer.
use_bias (bool, optional): Whether to use bias to project the keys,
queries, and values in the attention layer. Defaults to `True`.
use_rel_pos (bool, optional): Whether to use relative positional
emcodings in the attention layer. Defaults to `False`.
window_size (int, optional): Window size for windowed attention.
Defaults to `0`.
input_size (tuple[int, int], optional): Height and width of the input
image as a tuple of integers. Must be provided when using relative
positional embeddings. Defaults to `None`.
activation (str, optional): the activation function to apply in the
MLP head - should be a function. Defaults to `"gelu"`.
layer_norm_epsilon (float, optional): The epsilon to use in the layer
normalization layers. Defaults to `1e-6`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
- [Detectron2](https://github.com/facebookresearch/detectron2)
""" # noqa: E501
def __init__(
self,
project_dim,
mlp_dim,
num_heads,
use_bias=True,
use_rel_pos=False,
window_size=0,
input_size=None,
activation="gelu",
layer_norm_epsilon=1e-6,
**kwargs
):
super().__init__(**kwargs)
self.project_dim = project_dim
self.mlp_dim = mlp_dim
self.num_heads = num_heads
self.use_bias = use_bias
self.input_size = input_size
self.activation = activation
self.layer_norm_epsilon = layer_norm_epsilon
self.window_size = window_size
self.use_rel_pos = use_rel_pos
self.layer_norm1 = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon
)
self.layer_norm2 = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon
)
self.attention = MultiHeadAttentionWithRelativePE(
num_heads=self.num_heads,
key_dim=self.project_dim // self.num_heads,
use_bias=use_bias,
use_rel_pos=use_rel_pos,
input_size=(
input_size if window_size == 0 else (window_size, window_size)
),
)
self.mlp_block = MLP(
mlp_dim,
project_dim,
num_layers=2,
activation="gelu",
)
self.window_partitioning = WindowPartitioning(window_size)
def build(self, input_shape=None):
self.layer_norm1.build([None, None, None, self.project_dim])
self.layer_norm2.build([None, None, None, self.project_dim])
self.attention.build()
self.mlp_block.build([None, None, None, self.project_dim])
self.built = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, x):
shortcut = x
x = self.layer_norm1(x)
# Window Partition
if self.window_size > 0:
H, W = ops.shape(x)[1], ops.shape(x)[2]
x, HW_padded = self.window_partitioning.partition(x)
x = self.attention(x)
# Reverse Window Partition
if self.window_size > 0:
x = self.window_partitioning.unpartition(
x, HW_padded=HW_padded, HW=(H, W)
)
x = shortcut + x
x = x + self.mlp_block(self.layer_norm2(x))
return x
def get_config(self):
config = super().get_config()
config.update(
{
"project_dim": self.project_dim,
"mlp_dim": self.mlp_dim,
"num_heads": self.num_heads,
"use_bias": self.use_bias,
"use_rel_pos": self.use_rel_pos,
"window_size": self.window_size,
"input_size": self.input_size,
"activation": self.activation,
"layer_norm_epsilon": self.layer_norm_epsilon,
}
)
return config
@keras_cv_export(
"keras_cv.layers.ViTDetPatchingAndEmbedding", package="keras_cv.layers"
)
class ViTDetPatchingAndEmbedding(keras.layers.Layer):
"""Image to Patch Embedding using only a conv layer (without
layer normalization).
Args:
kernel_size (tuple[int, int], optional): Kernel size of the
projection layer. Defaults to `(16, 16)`.
strides (tuple, optional): Strides of the projection layer.
Defaults to `(16, 16)`.
embed_dim (int, optional): Number of filters to use in the
projection layer i.e. projection size. Defaults to `768`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
- [Detectron2](https://github.com/facebookresearch/detectron2)
""" # noqa: E501
def __init__(
self, kernel_size=(16, 16), strides=(16, 16), embed_dim=768, **kwargs
):
super().__init__(**kwargs)
self.projection = keras.layers.Conv2D(
embed_dim, kernel_size=kernel_size, strides=strides
)
self.kernel_size = kernel_size
self.strides = strides
self.embed_dim = embed_dim
def build(self, input_shape):
self.projection.build(input_shape)
self.built = True
def compute_output_shape(self, input_shape):
return self.projection.compute_output_shape(input_shape)
def call(self, x):
x = self.projection(x)
return x
def get_config(self):
config = super().get_config()
config.update(
{
"kernel_size": self.kernel_size,
"strides": self.strides,
"embed_dim": self.embed_dim,
}
)
return config
# TODO: Merge this with the `keras_cv.layers.PatchingAndEmbedding` class once
# it has been ported to Keras Core.
@keras_cv_export(
"keras_cv.layers.AddPositionalEmbedding", package="keras_cv.layers"
)
class AddPositionalEmbedding(keras.layers.Layer):
def __init__(self, img_size, patch_size, embed_dim, **kwargs):
super().__init__(**kwargs)
self.img_size = img_size
self.patch_size = patch_size
self.embed_dim = embed_dim
self.pos_embed = self.add_weight(
name="pos_embed",
shape=(
1,
img_size // patch_size,
img_size // patch_size,
embed_dim,
),
initializer="zeros",
trainable=True,
)
def compute_output_shape(self, input_shape):
return input_shape
def call(self, x):
return x + self.pos_embed
def get_confg(self):
config = super().get_config()
config.update(
{
"img_size": self.img_size,
"patch_size": self.patch_size,
"embed_dim": self.embed_dim,
}
)
return config
| keras-cv/keras_cv/layers/vit_det_layers.py/0 | {
"file_path": "keras-cv/keras_cv/layers/vit_det_layers.py",
"repo_id": "keras-cv",
"token_count": 9997
} | 17 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.losses.BinaryPenaltyReducedFocalCrossEntropy")
class BinaryPenaltyReducedFocalCrossEntropy(keras.losses.Loss):
"""Implements CenterNet modified Focal loss.
Compared with `keras.losses.BinaryFocalCrossentropy`, this loss discounts
for negative labels that have value less than `positive_threshold`, the
larger value the negative label is, the more discount to the final loss.
User can choose to divide the number of keypoints outside the loss
computation, or by passing in `sample_weight` as 1.0/num_key_points.
Args:
alpha: a focusing parameter used to compute the focal factor.
Defaults to 2.0. Note, this is equivalent to the `gamma` parameter in
`keras.losses.BinaryFocalCrossentropy`.
beta: a float parameter, penalty exponent for negative labels, defaults to
4.0.
from_logits: Whether `y_pred` is expected to be a logits tensor, defaults
to `False`.
positive_threshold: Anything bigger than this is treated as positive
label, defaults to 0.99.
positive_weight: single scalar weight on positive examples, defaults to
1.0.
negative_weight: single scalar weight on negative examples, defaults to
1.0.
Inputs:
y_true: [batch_size, ...] float tensor
y_pred: [batch_size, ...] float tensor with same shape as y_true.
References:
- [Objects as Points](https://arxiv.org/pdf/1904.07850.pdf) Eq 1.
- [Cornernet: Detecting objects as paired keypoints](https://arxiv.org/abs/1808.01244) for `alpha` and
`beta`.
""" # noqa: E501
def __init__(
self,
alpha=2.0,
beta=4.0,
from_logits=False,
positive_threshold=0.99,
positive_weight=1.0,
negative_weight=1.0,
reduction="sum_over_batch_size",
name="binary_penalty_reduced_focal_cross_entropy",
):
super().__init__(reduction=reduction, name=name)
self.alpha = alpha
self.beta = beta
self.from_logits = from_logits
self.positive_threshold = positive_threshold
self.positive_weight = positive_weight
self.negative_weight = negative_weight
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if self.from_logits:
y_pred = ops.sigmoid(y_pred)
# TODO(tanzhenyu): Evaluate whether we need clipping after model is
# trained.
y_pred = ops.clip(y_pred, 1e-4, 0.9999)
y_true = ops.clip(y_true, 0.0, 1.0)
pos_loss = ops.power(1.0 - y_pred, self.alpha) * ops.log(y_pred)
neg_loss = (
ops.power(1.0 - y_true, self.beta)
* ops.power(y_pred, self.alpha)
* ops.log(1.0 - y_pred)
)
positive_mask = y_true > self.positive_threshold
loss = ops.where(
positive_mask,
self.positive_weight * pos_loss,
self.negative_weight * neg_loss,
)
return -1.0 * loss
def get_config(self):
config = super().get_config()
config.update(
{
"alpha": self.alpha,
"beta": self.beta,
"from_logits": self.from_logits,
"positive_threshold": self.positive_threshold,
"positive_weight": self.positive_weight,
"negative_weight": self.negative_weight,
}
)
return config
| keras-cv/keras_cv/losses/penalty_reduced_focal_loss.py/0 | {
"file_path": "keras-cv/keras_cv/losses/penalty_reduced_focal_loss.py",
"repo_id": "keras-cv",
"token_count": 1767
} | 18 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for Backbone models."""
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.utils.preset_utils import check_preset_class
from keras_cv.utils.preset_utils import load_from_preset
from keras_cv.utils.python_utils import classproperty
from keras_cv.utils.python_utils import format_docstring
@keras_cv_export("keras_cv.models.Backbone")
class Backbone(keras.Model):
"""Base class for Backbone models.
Backbones are reusable layers of models trained on a standard task such as
Imagenet classification that can be reused in other tasks.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._pyramid_level_inputs = {}
self._functional_layer_ids = set(
id(layer) for layer in self._flatten_layers()
)
def __dir__(self):
# Temporary fixes for weight saving. This mimics the following PR for
# older version of Keras: https://github.com/keras-team/keras/pull/18982
def filter_fn(attr):
try:
return id(getattr(self, attr)) not in self._functional_layer_ids
except:
return True
return filter(filter_fn, super().__dir__())
def get_config(self):
# Don't chain to super here. The default `get_config()` for functional
# models is nested and cannot be passed to our Backbone constructors.
return {
"name": self.name,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
# The default `from_config()` for functional models will return a
# vanilla `keras.Model`. We override it to get a subclass instance back.
return cls(**config)
@classproperty
def presets(cls):
"""Dictionary of preset names and configs."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configs that include weights."""
return {}
@classproperty
def presets_without_weights(cls):
"""Dictionary of preset names and configs that don't include weights."""
return {
preset: cls.presets[preset]
for preset in set(cls.presets) - set(cls.presets_with_weights)
}
@classmethod
def from_preset(
cls,
preset,
load_weights=None,
**kwargs,
):
"""Instantiate {{model_name}} model from preset config and weights.
Args:
preset: string. Must be one of "{{preset_names}}".
If looking for a preset with pretrained weights, choose one of
"{{preset_with_weights_names}}".
load_weights: Whether to load pre-trained weights into model.
Defaults to `None`, which follows whether the preset has
pretrained weights available.
Examples:
```python
# Load architecture and weights from preset
model = keras_cv.models.{{model_name}}.from_preset(
"{{example_preset_name}}",
)
# Load randomly initialized model from preset architecture with weights
model = keras_cv.models.{{model_name}}.from_preset(
"{{example_preset_name}}",
load_weights=False,
```
"""
# We support short IDs for official presets, e.g. `"bert_base_en"`.
# Map these to a Kaggle Models handle.
if preset in cls.presets:
preset = cls.presets[preset]["kaggle_handle"]
check_preset_class(preset, cls)
return load_from_preset(
preset,
load_weights=load_weights,
config_overrides=kwargs,
)
def __init_subclass__(cls, **kwargs):
# Use __init_subclass__ to set up a correct docstring for from_preset.
super().__init_subclass__(**kwargs)
# If the subclass does not define from_preset, assign a wrapper so that
# each class can have a distinct docstring.
if "from_preset" not in cls.__dict__:
def from_preset(calling_cls, *args, **kwargs):
return super(cls, calling_cls).from_preset(*args, **kwargs)
cls.from_preset = classmethod(from_preset)
if not cls.presets:
cls.from_preset.__func__.__doc__ = """Not implemented.
No presets available for this class.
"""
# Format and assign the docstring unless the subclass has overridden it.
if cls.from_preset.__doc__ is None:
cls.from_preset.__func__.__doc__ = Backbone.from_preset.__doc__
format_docstring(
model_name=cls.__name__,
example_preset_name=next(iter(cls.presets_with_weights), ""),
preset_names='", "'.join(cls.presets),
preset_with_weights_names='", "'.join(cls.presets_with_weights),
)(cls.from_preset.__func__)
@property
def pyramid_level_inputs(self):
"""Intermediate model outputs for feature extraction.
Format is a dictionary with string as key and layer name as value.
The string key represents the level of the feature output. A typical
feature pyramid has five levels corresponding to scales "P3", "P4",
"P5", "P6", "P7" in the backbone. Scale Pn represents a feature map 2^n
times smaller in width and height than the input image.
Example:
```python
{
'P3': 'v2_stack_1_block4_out',
'P4': 'v2_stack_2_block6_out',
'P5': 'v2_stack_3_block3_out',
}
```
"""
return self._pyramid_level_inputs
@pyramid_level_inputs.setter
def pyramid_level_inputs(self, value):
self._pyramid_level_inputs = value
| keras-cv/keras_cv/models/backbones/backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/backbone.py",
"repo_id": "keras-cv",
"token_count": 2673
} | 19 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.models.backbones.efficientnet_lite.efficientnet_lite_backbone import ( # noqa: E501
EfficientNetLiteBackbone,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)
(ICML 2019)
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
Usage:
```python
input_data = np.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
model = {name}Backbone()
output = model(input_data)
```
""" # noqa: E501
class EfficientNetLiteB0Backbone(EfficientNetLiteBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetLiteBackbone.from_preset(
"efficientnetlite_b0", **kwargs
)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetLiteB1Backbone(EfficientNetLiteBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetLiteBackbone.from_preset(
"efficientnetlite_b1", **kwargs
)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetLiteB2Backbone(EfficientNetLiteBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetLiteBackbone.from_preset(
"efficientnetlite_b2", **kwargs
)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetLiteB3Backbone(EfficientNetLiteBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetLiteBackbone.from_preset(
"efficientnetlite_b3", **kwargs
)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetLiteB4Backbone(EfficientNetLiteBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetLiteBackbone.from_preset(
"efficientnetlite_b4", **kwargs
)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
setattr(
EfficientNetLiteB0Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetLiteB0"),
)
setattr(
EfficientNetLiteB1Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetLiteB1"),
)
setattr(
EfficientNetLiteB2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetLiteB2"),
)
setattr(
EfficientNetLiteB3Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetLiteB3"),
)
setattr(
EfficientNetLiteB4Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetLiteB4"),
)
| keras-cv/keras_cv/models/backbones/efficientnet_lite/efficientnet_lite_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_lite/efficientnet_lite_aliases.py",
"repo_id": "keras-cv",
"token_count": 2848
} | 20 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_aliases import (
EfficientNetV2SBackbone,
)
from keras_cv.models.backbones.efficientnet_v2.efficientnet_v2_backbone import (
EfficientNetV2Backbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class EfficientNetV2BackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(8, 224, 224, 3))
def test_valid_call(self):
model = EfficientNetV2Backbone(
stackwise_kernel_sizes=[3, 3, 3, 3, 3, 3],
stackwise_num_repeats=[2, 4, 4, 6, 9, 15],
stackwise_input_filters=[24, 24, 48, 64, 128, 160],
stackwise_output_filters=[24, 48, 64, 128, 160, 256],
stackwise_expansion_ratios=[1, 4, 4, 4, 6, 6],
stackwise_squeeze_and_excite_ratios=[0.0, 0.0, 0, 0.25, 0.25, 0.25],
stackwise_strides=[1, 2, 2, 2, 1, 2],
stackwise_conv_types=[
"fused",
"fused",
"fused",
"unfused",
"unfused",
"unfused",
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_alias_model_with_rescaling(self):
model = EfficientNetV2SBackbone(include_rescaling=True)
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = EfficientNetV2Backbone(
stackwise_kernel_sizes=[3, 3, 3, 3, 3, 3],
stackwise_num_repeats=[2, 4, 4, 6, 9, 15],
stackwise_input_filters=[24, 24, 48, 64, 128, 160],
stackwise_output_filters=[24, 48, 64, 128, 160, 256],
stackwise_expansion_ratios=[1, 4, 4, 4, 6, 6],
stackwise_squeeze_and_excite_ratios=[0.0, 0.0, 0, 0.25, 0.25, 0.25],
stackwise_strides=[1, 2, 2, 2, 1, 2],
stackwise_conv_types=[
"fused",
"fused",
"fused",
"unfused",
"unfused",
"unfused",
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = EfficientNetV2Backbone(
stackwise_kernel_sizes=[3, 3, 3, 3, 3, 3],
stackwise_num_repeats=[2, 4, 4, 6, 9, 15],
stackwise_input_filters=[24, 24, 48, 64, 128, 160],
stackwise_output_filters=[24, 48, 64, 128, 160, 256],
stackwise_expansion_ratios=[1, 4, 4, 4, 6, 6],
stackwise_squeeze_and_excite_ratios=[0.0, 0.0, 0, 0.25, 0.25, 0.25],
stackwise_strides=[1, 2, 2, 2, 1, 2],
stackwise_conv_types=[
"fused",
"fused",
"fused",
"unfused",
"unfused",
"unfused",
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=True,
)
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "efficientnet_v2_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, EfficientNetV2Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_alias_model(self):
model = EfficientNetV2SBackbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "efficientnet_v2_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
# Note that these aliases serialized as the base class
self.assertIsInstance(restored_model, EfficientNetV2Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
def test_feature_pyramid_inputs(self):
model = EfficientNetV2SBackbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P1", "P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P1"].shape,
(None, input_size // 2**1, input_size // 2**1, 24),
)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 48),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 64),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 160),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 1280),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
model = EfficientNetV2Backbone(
stackwise_kernel_sizes=[3, 3, 3, 3, 3, 3],
stackwise_num_repeats=[2, 4, 4, 6, 9, 15],
stackwise_input_filters=[24, 24, 48, 64, 128, 160],
stackwise_output_filters=[24, 48, 64, 128, 160, 256],
stackwise_expansion_ratios=[1, 4, 4, 4, 6, 6],
stackwise_squeeze_and_excite_ratios=[0.0, 0.0, 0, 0.25, 0.25, 0.25],
stackwise_strides=[1, 2, 2, 2, 1, 2],
stackwise_conv_types=[
"fused",
"fused",
"fused",
"unfused",
"unfused",
"unfused",
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=True,
)
self.assertEqual(model.output_shape, (None, None, None, 1280))
| keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v2/efficientnet_v2_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 3821
} | 21 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNetV1 model preset configurations."""
backbone_presets_no_weights = {
"resnet18": {
"metadata": {
"description": (
"ResNet model with 18 layers where the batch normalization "
"and ReLU activation are applied after the convolution layers "
"(v1 style)."
),
"params": 11186112,
"official_name": "ResNetV1",
"path": "resnet_v1",
},
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet18/2",
},
"resnet34": {
"metadata": {
"description": (
"ResNet model with 34 layers where the batch normalization "
"and ReLU activation are applied after the convolution layers "
"(v1 style)."
),
"params": 21301696,
"official_name": "ResNetV1",
"path": "resnet_v1",
},
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet34/2",
},
"resnet50": {
"metadata": {
"description": (
"ResNet model with 50 layers where the batch normalization "
"and ReLU activation are applied after the convolution layers "
"(v1 style)."
),
"params": 23561152,
"official_name": "ResNetV1",
"path": "resnet_v1",
},
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet50/2",
},
"resnet101": {
"metadata": {
"description": (
"ResNet model with 101 layers where the batch normalization "
"and ReLU activation are applied after the convolution layers "
"(v1 style)."
),
"params": 42605504,
"official_name": "ResNetV1",
"path": "resnet_v1",
},
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet101/2",
},
"resnet152": {
"metadata": {
"description": (
"ResNet model with 152 layers where the batch normalization "
"and ReLU activation are applied after the convolution layers "
"(v1 style)."
),
"params": 58295232,
"official_name": "ResNetV1",
"path": "resnet_v1",
},
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet152/2",
},
}
backbone_presets_with_weights = {
"resnet50_imagenet": {
"metadata": {
"description": (
"ResNet model with 50 layers where the batch normalization "
"and ReLU activation are applied after the convolution layers "
"(v1 style). "
"Trained on Imagenet 2012 classification task."
),
"params": 23561152,
"official_name": "ResNetV1",
"path": "resnet_v1",
},
"kaggle_handle": "kaggle://keras/resnetv1/keras/resnet50_imagenet/2",
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1743
} | 22 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.vit_det_layers import AddPositionalEmbedding
from keras_cv.layers.vit_det_layers import ViTDetPatchingAndEmbedding
from keras_cv.layers.vit_det_layers import WindowedTransformerEncoder
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.vit_det.vit_det_backbone_presets import (
backbone_presets,
)
from keras_cv.models.backbones.vit_det.vit_det_backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.utils.python_utils import classproperty
@keras_cv_export("keras_cv.models.ViTDetBackbone", package="keras_cv.models")
class ViTDetBackbone(Backbone):
"""A ViT image encoder that uses a windowed transformer encoder and
relative positional encodings.
Args:
input_shape (tuple[int], optional): The size of the input image in
`(H, W, C)` format. Defaults to `(1024, 1024, 3)`.
input_tensor (KerasTensor, optional): Output of
`keras.layers.Input()`) to use as image input for the model.
Defaults to `None`.
include_rescaling (bool, optional): Whether to rescale the inputs. If
set to `True`, inputs will be passed through a
`Rescaling(1/255.0)` layer. Defaults to `False`.
patch_size (int, optional): the patch size to be supplied to the
Patching layer to turn input images into a flattened sequence of
patches. Defaults to `16`.
embed_dim (int, optional): The latent dimensionality to be projected
into in the output of each stacked windowed transformer encoder.
Defaults to `768`.
depth (int, optional): The number of transformer encoder layers to
stack in the Vision Transformer. Defaults to `12`.
mlp_dim (int, optional): The dimensionality of the hidden Dense
layer in the transformer MLP head. Defaults to `768*4`.
num_heads (int, optional): the number of heads to use in the
`MultiHeadAttentionWithRelativePE` layer of each transformer
encoder. Defaults to `12`.
out_chans (int, optional): The number of channels (features) in the
output (image encodings). Defaults to `256`.
use_bias (bool, optional): Whether to use bias to project the keys,
queries, and values in the attention layer. Defaults to `True`.
use_abs_pos (bool, optional): Whether to add absolute positional
embeddings to the output patches. Defaults to `True`.
use_rel_pos (bool, optional): Whether to use relative positional
emcodings in the attention layer. Defaults to `True`.
window_size (int, optional): The size of the window for windowed
attention in the transformer encoder blocks. Defaults to `14`.
global_attention_indices (list, optional): Indexes for blocks using
global attention. Defaults to `[2, 5, 8, 11]`.
layer_norm_epsilon (int, optional): The epsilon to use in the layer
normalization blocks in transformer encoder. Defaults to `1e-6`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
- [Detectron2](https://github.com/facebookresearch/detectron2)
""" # noqa: E501
def __init__(
self,
*,
include_rescaling,
input_shape=(1024, 1024, 3),
input_tensor=None,
patch_size=16,
embed_dim=768,
depth=12,
mlp_dim=768 * 4,
num_heads=12,
out_chans=256,
use_bias=True,
use_abs_pos=True,
use_rel_pos=True,
window_size=14,
global_attention_indices=[2, 5, 8, 11],
layer_norm_epsilon=1e-6,
**kwargs
):
img_input = utils.parse_model_inputs(
input_shape, input_tensor, name="images"
)
# Check that the input image is well specified.
if img_input.shape[-3] is None or img_input.shape[-2] is None:
raise ValueError(
"Height and width of the image must be specified"
" in `input_shape`."
)
if img_input.shape[-3] != img_input.shape[-2]:
raise ValueError(
"Input image must be square i.e. the height must"
" be equal to the width in the `input_shape`"
" tuple/tensor."
)
img_size = img_input.shape[-3]
x = img_input
if include_rescaling:
# Use common rescaling strategy across keras_cv
x = keras.layers.Rescaling(1.0 / 255.0)(x)
# VITDet scales inputs based on the standard ImageNet mean/stddev.
x = (x - ops.array([0.485, 0.456, 0.406], dtype=x.dtype)) / (
ops.array([0.229, 0.224, 0.225], dtype=x.dtype)
)
x = ViTDetPatchingAndEmbedding(
kernel_size=(patch_size, patch_size),
strides=(patch_size, patch_size),
embed_dim=embed_dim,
)(x)
if use_abs_pos:
x = AddPositionalEmbedding(img_size, patch_size, embed_dim)(x)
for i in range(depth):
x = WindowedTransformerEncoder(
project_dim=embed_dim,
mlp_dim=mlp_dim,
num_heads=num_heads,
use_bias=use_bias,
use_rel_pos=use_rel_pos,
window_size=(
window_size if i not in global_attention_indices else 0
),
input_size=(img_size // patch_size, img_size // patch_size),
)(x)
x = keras.models.Sequential(
[
keras.layers.Conv2D(
filters=out_chans, kernel_size=1, use_bias=False
),
keras.layers.LayerNormalization(epsilon=1e-6),
keras.layers.Conv2D(
filters=out_chans,
kernel_size=3,
padding="same",
use_bias=False,
),
keras.layers.LayerNormalization(epsilon=1e-6),
]
)(x)
super().__init__(inputs=img_input, outputs=x, **kwargs)
self.patch_size = patch_size
self.embed_dim = embed_dim
self.depth = depth
self.mlp_dim = mlp_dim
self.num_heads = num_heads
self.out_chans = out_chans
self.use_bias = use_bias
self.use_rel_pos = use_rel_pos
self.use_abs_pos = use_abs_pos
self.window_size = window_size
self.global_attention_indices = global_attention_indices
self.layer_norm_epsilon = layer_norm_epsilon
self.input_tensor = input_tensor
self.include_rescaling = include_rescaling
@property
def pyramid_level_inputs(self):
raise NotImplementedError(
"The `ViTDetBackbone` model doesn't compute"
" pyramid level features."
)
def get_config(self):
config = super().get_config()
config.update(
{
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"include_rescaling": self.include_rescaling,
"patch_size": self.patch_size,
"embed_dim": self.embed_dim,
"depth": self.depth,
"mlp_dim": self.mlp_dim,
"num_heads": self.num_heads,
"out_chans": self.out_chans,
"use_bias": self.use_bias,
"use_abs_pos": self.use_abs_pos,
"use_rel_pos": self.use_rel_pos,
"window_size": self.window_size,
"global_attention_indices": self.global_attention_indices,
"layer_norm_epsilon": self.layer_norm_epsilon,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(backbone_presets_with_weights)
| keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/vit_det/vit_det_backbone.py",
"repo_id": "keras-cv",
"token_count": 4128
} | 23 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.feature_extractor.clip.clip_encoder import CLIPEncoder
class CLIPTextEncoder(keras.Model):
def __init__(
self,
transformer_width,
transformer_layers,
transformer_heads,
vocab_size,
embed_dim,
context_length,
**kwargs,
):
super().__init__(
**kwargs,
)
self.transformer_width = transformer_width
self.transformer_layers = transformer_layers
self.transformer_heads = transformer_heads
self.vocab_size = vocab_size
self.embed_dim = embed_dim
self.context_length = context_length
self.token_embedding = keras.layers.Embedding(
vocab_size,
transformer_width,
name="token_embedding",
)
self.positional_embedding = keras.layers.Embedding(
self.context_length,
transformer_width,
name="positional_embedding",
)
self.encoder = CLIPEncoder(
width=transformer_width,
num_layers=transformer_layers,
heads=transformer_heads,
name="clip_encoder",
)
self.ln_final = keras.layers.LayerNormalization(name="ln_final")
self.text_projector = keras.layers.Dense(
embed_dim, name="text_projector", use_bias=False
)
def build(self, input_shape):
super().build(input_shape)
self.token_embedding.build(input_shape)
self.positional_embedding.build([1, self.context_length])
self.encoder.build(None)
self.ln_final.build([None, None, self.transformer_width])
self.text_projector.build([None, None, self.transformer_width])
def call(self, inputs, attention_mask=None):
token_embedding = self.token_embedding(inputs)
position_ids = ops.expand_dims(
ops.arange(self.context_length, dtype="int32"), 0
)
position_embedding = self.positional_embedding(position_ids)
position_embedding = ops.tile(
position_embedding, repeats=(inputs.shape[0], 1, 1)
)
causal_attention_mask = ops.ones(
(self.context_length, self.context_length)
)
# Zero out the lower diagonal
causal_attention_mask = ops.triu(causal_attention_mask)
causal_attention_mask = ops.cast(causal_attention_mask, "float32")
attention_mask = ops.cast(attention_mask, dtype="float32")
expanded_mask = ops.tile(
attention_mask[:, None, None, :], (1, 1, self.context_length, 1)
)
expanded_mask = (1.0 - expanded_mask) * (-1e8)
encoded_output = self.encoder(
token_embedding + position_embedding,
causal_attention_mask=causal_attention_mask,
attention_mask=expanded_mask,
)
layer_norm = self.ln_final(encoded_output)
indices = ops.expand_dims(
ops.cast(ops.argmax(inputs, axis=-1), "int32"), axis=-1
)
selected_features = ops.take_along_axis(
layer_norm, indices[:, :, None], axis=1
)
text_features = self.text_projector(selected_features)
output = ops.squeeze(text_features, axis=1)
return output
def get_config(self):
config = super().get_config()
config.update(
{
"transformer_width": self.transformer_width,
"transformer_layers": self.transformer_layers,
"transformer_heads": self.transformer_heads,
"vocab_size": self.vocab_size,
"embed_dim": self.embed_dim,
"context_length": self.context_length,
}
)
return config
| keras-cv/keras_cv/models/feature_extractor/clip/clip_text_model.py/0 | {
"file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_text_model.py",
"repo_id": "keras-cv",
"token_count": 1964
} | 24 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from absl.testing import parameterized
from tensorflow import keras
from tensorflow.keras import optimizers
from keras_cv.models import ResNet18V2Backbone
from keras_cv.models.legacy.object_detection.faster_rcnn.faster_rcnn import (
FasterRCNN,
)
from keras_cv.models.object_detection.__test_utils__ import (
_create_bounding_box_dataset,
)
from keras_cv.tests.test_case import TestCase
class FasterRCNNTest(TestCase):
# TODO(ianstenbit): Make FasterRCNN support shapes that are not multiples
# of 128, perhaps by adding a flag to the anchor generator for whether to
# include anchors centered outside of the image. (RetinaNet does use those,
# while FasterRCNN doesn't). For more context on why this is the case, see
# https://github.com/keras-team/keras-cv/pull/1882
@parameterized.parameters(
((2, 640, 384, 3),),
((2, 512, 512, 3),),
((2, 128, 128, 3),),
)
def test_faster_rcnn_infer(self, batch_shape):
model = FasterRCNN(
num_classes=80,
bounding_box_format="xyxy",
backbone=ResNet18V2Backbone(),
)
images = tf.random.normal(batch_shape)
outputs = model(images, training=False)
# 1000 proposals in inference
self.assertAllEqual([2, 1000, 81], outputs[1].shape)
self.assertAllEqual([2, 1000, 4], outputs[0].shape)
@parameterized.parameters(
((2, 640, 384, 3),),
((2, 512, 512, 3),),
((2, 128, 128, 3),),
)
def test_faster_rcnn_train(self, batch_shape):
model = FasterRCNN(
num_classes=80,
bounding_box_format="xyxy",
backbone=ResNet18V2Backbone(),
)
images = tf.random.normal(batch_shape)
outputs = model(images, training=True)
self.assertAllEqual([2, 1000, 81], outputs[1].shape)
self.assertAllEqual([2, 1000, 4], outputs[0].shape)
def test_invalid_compile(self):
model = FasterRCNN(
num_classes=80,
bounding_box_format="yxyx",
backbone=ResNet18V2Backbone(),
)
with self.assertRaisesRegex(ValueError, "only accepts"):
model.compile(rpn_box_loss="binary_crossentropy")
with self.assertRaisesRegex(ValueError, "only accepts"):
model.compile(
rpn_classification_loss=keras.losses.BinaryCrossentropy(
from_logits=False
)
)
@pytest.mark.large # Fit is slow, so mark these large.
def test_faster_rcnn_with_dictionary_input_format(self):
faster_rcnn = FasterRCNN(
num_classes=20,
bounding_box_format="xywh",
backbone=ResNet18V2Backbone(),
)
images, boxes = _create_bounding_box_dataset("xywh")
dataset = tf.data.Dataset.from_tensor_slices(
{"images": images, "bounding_boxes": boxes}
).batch(5, drop_remainder=True)
faster_rcnn.compile(
optimizer=optimizers.Adam(),
box_loss="Huber",
classification_loss="SparseCategoricalCrossentropy",
rpn_box_loss="Huber",
rpn_classification_loss="BinaryCrossentropy",
)
faster_rcnn.fit(dataset, epochs=1)
faster_rcnn.evaluate(dataset)
| keras-cv/keras_cv/models/legacy/object_detection/faster_rcnn/faster_rcnn_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/object_detection/faster_rcnn/faster_rcnn_test.py",
"repo_id": "keras-cv",
"token_count": 1681
} | 25 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
try:
from keras.src.utils import tf_utils
except ImportError:
from keras.utils import tf_utils
def _minimum_control_deps(outputs):
"""Returns the minimum control dependencies to ensure step succeeded."""
if tf.executing_eagerly():
return [] # Control dependencies not needed.
outputs = tf.nest.flatten(outputs, expand_composites=True)
for out in outputs:
# Variables can't be control dependencies.
if not isinstance(out, tf.Variable):
return [out] # Return first Tensor or Op from outputs.
return [] # No viable Tensor or Op to use for control deps.
def make_predict_function(model, force=False):
if model.predict_function is not None and not force:
return model.predict_function
def step_function(iterator):
"""Runs a single evaluation step."""
def run_step(data):
outputs = model.predict_step(data)
# Ensure counter is updated only if `test_step` succeeds.
with tf.control_dependencies(_minimum_control_deps(outputs)):
model._predict_counter.assign_add(1)
return outputs
if model._jit_compile:
run_step = tf.function(
run_step, jit_compile=True, reduce_retracing=True
)
data = next(iterator)
outputs = model.distribute_strategy.run(run_step, args=(data,))
outputs = model.distribute_strategy.gather(outputs, axis=0)
# Note that this is the only deviation from the base keras.Model
# implementation. We add the decode_step inside of the computation
# graph but outside of the distribute_strategy (i.e on host CPU).
if not isinstance(data, tf.Tensor):
data = tf.concat(data.values, axis=0)
return model.decode_predictions(outputs, data)
# Special case if steps_per_execution is one.
if (
model._steps_per_execution is None
or model._steps_per_execution.numpy().item() == 1
):
def predict_function(iterator):
"""Runs an evaluation execution with a single step."""
return step_function(iterator)
else:
def predict_function(iterator):
"""Runs an evaluation execution with multiple steps."""
outputs = step_function(iterator)
for _ in tf.range(model._steps_per_execution - 1):
tf.autograph.experimental.set_loop_options(
shape_invariants=[
(
outputs,
tf.nest.map_structure(
lambda t: tf_utils.get_tensor_spec(
t, dynamic_batch=True
).shape,
outputs,
),
)
]
)
step_outputs = step_function(iterator)
outputs = tf.nest.map_structure(
lambda t1, t2: tf.concat([t1, t2]), outputs, step_outputs
)
return outputs
if not model.run_eagerly:
predict_function = tf.function(predict_function, reduce_retracing=True)
model.predict_function = predict_function
return predict_function
| keras-cv/keras_cv/models/object_detection/predict_utils.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/predict_utils.py",
"repo_id": "keras-cv",
"token_count": 1702
} | 26 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.backend import keras
from keras_cv.backend import ops
BATCH_NORM_EPSILON = 1e-3
BATCH_NORM_MOMENTUM = 0.97
# TODO(ianstenbit): Remove this method once we're using CSPDarkNet backbone
# (Calls to it should be inlined in the detector head)
def apply_conv_bn(
inputs,
output_channel,
kernel_size=1,
strides=1,
activation="swish",
name="conv_bn",
):
if kernel_size > 1:
inputs = keras.layers.ZeroPadding2D(
padding=kernel_size // 2, name=f"{name}_pad"
)(inputs)
x = keras.layers.Conv2D(
filters=output_channel,
kernel_size=kernel_size,
strides=strides,
padding="valid",
use_bias=False,
name=f"{name}_conv",
)(inputs)
x = keras.layers.BatchNormalization(
momentum=BATCH_NORM_MOMENTUM,
epsilon=BATCH_NORM_EPSILON,
name=f"{name}_bn",
)(x)
x = keras.layers.Activation(activation, name=name)(x)
return x
# TODO(ianstenbit): Remove this method once we're using CSPDarkNet backbone
# Calls to it should instead call the CSP block from the DarkNet implementation.
def apply_csp_block(
inputs,
channels=-1,
depth=2,
shortcut=True,
expansion=0.5,
activation="swish",
name="csp_block",
):
channel_axis = -1
channels = channels if channels > 0 else inputs.shape[channel_axis]
hidden_channels = int(channels * expansion)
pre = apply_conv_bn(
inputs,
hidden_channels * 2,
kernel_size=1,
activation=activation,
name=f"{name}_pre",
)
short, deep = ops.split(pre, 2, axis=channel_axis)
out = [short, deep]
for id in range(depth):
deep = apply_conv_bn(
deep,
hidden_channels,
kernel_size=3,
activation=activation,
name=f"{name}_pre_{id}_1",
)
deep = apply_conv_bn(
deep,
hidden_channels,
kernel_size=3,
activation=activation,
name=f"{name}_pre_{id}_2",
)
deep = (out[-1] + deep) if shortcut else deep
out.append(deep)
out = ops.concatenate(out, axis=channel_axis)
out = apply_conv_bn(
out,
channels,
kernel_size=1,
activation=activation,
name=f"{name}_output",
)
return out
| keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_layers.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_layers.py",
"repo_id": "keras-cv",
"token_count": 1278
} | 27 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv.backend import keras
from keras_cv.backend.config import keras_3
from keras_cv.layers.object_detection_3d.voxelization import DynamicVoxelization
from keras_cv.models.object_detection_3d.center_pillar import (
MultiClassDetectionHead,
)
from keras_cv.models.object_detection_3d.center_pillar import (
MultiClassHeatmapDecoder,
)
from keras_cv.models.object_detection_3d.center_pillar import (
MultiHeadCenterPillar,
)
from keras_cv.models.object_detection_3d.center_pillar_backbone import (
CenterPillarBackbone,
)
from keras_cv.tests.test_case import TestCase
@pytest.mark.skipif(
keras_3() and keras.backend.backend() == "torch",
reason="CenterPillar does not yet support PyTorch.",
)
class CenterPillarTest(TestCase):
def test_center_pillar_call(self):
voxel_net = DynamicVoxelization(
voxel_size=[0.1, 0.1, 1000],
spatial_size=[-20, 20, -20, 20, -20, 20],
)
# dimensions computed from voxel_net
backbone = CenterPillarBackbone(
stackwise_down_blocks=[1, 1],
stackwise_down_filters=[64, 128],
stackwise_up_filters=[128, 64],
input_shape=(None, None, 128),
)
decoder = MultiClassHeatmapDecoder(
num_classes=2,
num_head_bin=[2, 2],
anchor_size=[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
max_pool_size=[3, 3],
max_num_box=[3, 4],
heatmap_threshold=[0.2, 0.2],
voxel_size=voxel_net._voxel_size,
spatial_size=voxel_net._spatial_size,
)
multiclass_head = MultiClassDetectionHead(
num_classes=2,
num_head_bin=[2, 2],
)
model = MultiHeadCenterPillar(
backbone=backbone,
voxel_net=voxel_net,
multiclass_head=multiclass_head,
prediction_decoder=decoder,
)
point_xyz = tf.random.normal([2, 1000, 3])
point_feature = tf.random.normal([2, 1000, 4])
point_mask = tf.constant(True, shape=[2, 1000, 1])
outputs = model(
{
"point_xyz": point_xyz,
"point_feature": point_feature,
"point_mask": point_mask,
},
training=True,
)
self.assertEqual(outputs["class_1"].shape, (2, 400, 400, 12))
self.assertEqual(outputs["class_2"].shape, (2, 400, 400, 12))
def test_center_pillar_predict(self):
voxel_net = DynamicVoxelization(
voxel_size=[0.1, 0.1, 1000],
spatial_size=[-20, 20, -20, 20, -20, 20],
)
backbone = CenterPillarBackbone(
stackwise_down_blocks=[1, 1],
stackwise_down_filters=[64, 128],
stackwise_up_filters=[128, 64],
input_shape=(None, None, 128),
)
decoder = MultiClassHeatmapDecoder(
num_classes=2,
num_head_bin=[2, 2],
anchor_size=[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
max_pool_size=[3, 3],
max_num_box=[3, 4],
heatmap_threshold=[0.2, 0.2],
voxel_size=voxel_net._voxel_size,
spatial_size=voxel_net._spatial_size,
)
multiclass_head = MultiClassDetectionHead(
num_classes=2,
num_head_bin=[2, 2],
)
model = MultiHeadCenterPillar(
backbone=backbone,
voxel_net=voxel_net,
multiclass_head=multiclass_head,
prediction_decoder=decoder,
)
point_xyz = tf.random.normal([2, 1000, 3])
point_feature = tf.random.normal([2, 1000, 4])
point_mask = tf.constant(True, shape=[2, 1000, 1])
outputs = model.predict(
{
"point_xyz": point_xyz,
"point_feature": point_feature,
"point_mask": point_mask,
}
)
# max number boxes is 3
self.assertEqual(outputs["3d_boxes"]["boxes"].shape, (2, 7, 7))
self.assertEqual(outputs["3d_boxes"]["classes"].shape, (2, 7))
self.assertEqual(outputs["3d_boxes"]["confidence"].shape, (2, 7))
self.assertAllEqual(
outputs["3d_boxes"]["classes"],
tf.constant([1, 1, 1, 2, 2, 2, 2] * 2, shape=(2, 7)),
)
| keras-cv/keras_cv/models/object_detection_3d/center_pillar_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection_3d/center_pillar_test.py",
"repo_id": "keras-cv",
"token_count": 2425
} | 28 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models.stable_diffusion.attention_block import ( # noqa: E501
AttentionBlock,
)
from keras_cv.models.stable_diffusion.padded_conv2d import PaddedConv2D
from keras_cv.models.stable_diffusion.resnet_block import ResnetBlock
@keras_cv_export("keras_cv.models.stable_diffusion.ImageEncoder")
class ImageEncoder(keras.Sequential):
"""ImageEncoder is the VAE Encoder for StableDiffusion."""
def __init__(self, download_weights=True):
super().__init__(
[
keras.layers.Input((None, None, 3)),
PaddedConv2D(128, 3, padding=1),
ResnetBlock(128),
ResnetBlock(128),
PaddedConv2D(128, 3, padding=((0, 1), (0, 1)), strides=2),
ResnetBlock(256),
ResnetBlock(256),
PaddedConv2D(256, 3, padding=((0, 1), (0, 1)), strides=2),
ResnetBlock(512),
ResnetBlock(512),
PaddedConv2D(512, 3, padding=((0, 1), (0, 1)), strides=2),
ResnetBlock(512),
ResnetBlock(512),
ResnetBlock(512),
AttentionBlock(512),
ResnetBlock(512),
keras.layers.GroupNormalization(epsilon=1e-5),
keras.layers.Activation("swish"),
PaddedConv2D(8, 3, padding=1),
PaddedConv2D(8, 1),
# TODO(lukewood): can this be refactored to be a Rescaling
# layer? Perhaps some sort of rescale and gather?
# Either way, we may need a lambda to gather the first 4
# dimensions.
keras.layers.Lambda(lambda x: x[..., :4] * 0.18215),
]
)
if download_weights:
image_encoder_weights_fpath = keras.utils.get_file(
origin="https://huggingface.co/fchollet/stable-diffusion/resolve/main/vae_encoder.h5", # noqa: E501
file_hash="c60fb220a40d090e0f86a6ab4c312d113e115c87c40ff75d11ffcf380aab7ebb", # noqa: E501
)
self.load_weights(image_encoder_weights_fpath)
| keras-cv/keras_cv/models/stable_diffusion/image_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/image_encoder.py",
"repo_id": "keras-cv",
"token_count": 1309
} | 29 |
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import numpy as np
import pytest
import tensorflow as tf
import keras_cv
from keras_cv.tests.test_case import TestCase
num_points = 200000
num_boxes = 1000
box_dimension = 20.0
def get_points_boxes():
points = tf.random.uniform(
shape=[num_points, 2], minval=0, maxval=box_dimension, dtype=tf.float32
)
points_z = 5.0 * np.ones(shape=[num_points, 1], dtype="float32")
points = tf.concat([points, points_z], axis=-1)
boxes_x = tf.random.uniform(
shape=[num_boxes, 1],
minval=0,
maxval=box_dimension - 1.0,
dtype=tf.float32,
)
boxes_y = tf.random.uniform(
shape=[num_boxes, 1],
minval=0,
maxval=box_dimension - 1.0,
dtype=tf.float32,
)
boxes_dx = tf.random.uniform(
shape=[num_boxes, 1], minval=0, maxval=5.0, dtype=tf.float32
)
boxes_dx = tf.math.minimum(box_dimension - boxes_x, boxes_dx)
boxes_dy = tf.random.uniform(
shape=[num_boxes, 1], minval=0, maxval=5.0, dtype=tf.float32
)
boxes_dy = tf.math.minimum(box_dimension - boxes_y, boxes_dy)
boxes_z = 5.0 * np.ones([num_boxes, 1], dtype="float32")
boxes_dz = 3.0 * np.ones([num_boxes, 1], dtype="float32")
boxes_angle = np.zeros([num_boxes, 1], dtype="float32")
boxes = tf.concat(
[boxes_x, boxes_y, boxes_z, boxes_dx, boxes_dy, boxes_dz, boxes_angle],
axis=-1,
)
return points, boxes
class WithinBox3DTest(TestCase):
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_unbatched_unrotated(self):
boxes = np.array(
[
[0, 0, 0, 4, 4, 4, 0],
[5, 5, 5, 1, 1, 1, 0],
]
).astype("float32")
points = np.array(
[
[0, 0, 0],
[0, 0, 2],
# this point has z value larger than box top z
[0, 0, 2.1],
[2, 0, 0],
[2.01, 0, 0],
# this point belongs to 2nd box
[5.5, 5.5, 5.5],
# this point doesn't belong to 2nd box
[5.6, 5.5, 5.5],
]
).astype("float32")
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllEqual([0, 0, -1, 0, -1, 1, -1], res)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_unbatched_rotated(self):
# a box rotated with 45 degree, the intersection with x and y axis
# is [2*sqrt(2), 0] and [0, 2*sqrt(2)]
boxes = np.array(
[
[0, 0, 0, 4, 4, 4, np.pi / 4],
]
).astype("float32")
points = np.array(
[
[0, 0, 0],
[0, 0, 2],
# this point has z value larger than box top z
[0, 0, 2.1],
[2.82, 0, 0],
# this point has x value larger than rotated box
[2.83, 0, 0],
]
).astype("float32")
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllClose([0, 0, -1, 0, -1], res)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_batched_unrotated(self):
boxes = np.array(
[
[[0, 0, 0, 4, 4, 4, 0]],
[[5, 5, 5, 1, 1, 1, 0]],
]
).astype("float32")
points = np.array(
[
[
[0, 0, 0],
[0, 0, 2],
# this point has z value larger than box top z
[0, 0, 2.1],
[2, 0, 0],
[2.01, 0, 0],
# this point belongs to 2nd box
[5.5, 5.5, 5.5],
# this point doesn't belong to 2nd box
[5.6, 5.5, 5.5],
]
]
* 2
).astype("float32")
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllEqual(
[[0, 0, -1, 0, -1, -1, -1], [-1, -1, -1, -1, -1, 0, -1]], res
)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_batched_rotated(self):
# a box rotated with 45 degree, the intersection with x and y axis
# is [2*sqrt(2), 0] and [0, 2*sqrt(2)]
boxes = np.array(
[
[[0, 0, 0, 4, 4, 4, np.pi / 4]],
[[5, 5, 5, 1, 1, 1, 0]],
]
).astype("float32")
points = np.array(
[
[
[0, 0, 0],
[0, 0, 2],
# this point has z value larger than box top z
[0, 0, 2.1],
[2.82, 0, 0],
# this point has x value larger than rotated box
[2.83, 0, 0],
]
]
* 2
).astype("float32")
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllEqual([[0, 0, -1, 0, -1], [-1, -1, -1, -1, -1]], res)
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
def test_many_points(self):
points, boxes = get_points_boxes()
for _ in range(5):
res = keras_cv.point_cloud.within_box3d_index(points, boxes)
self.assertAllClose(res.shape, points.shape[:1])
@pytest.mark.skipif(
"TEST_CUSTOM_OPS" not in os.environ
or os.environ["TEST_CUSTOM_OPS"] != "true",
reason="Requires binaries compiled from source",
)
@pytest.mark.extra_large
def test_equal(self):
for _ in range(10000):
with tf.device("cpu:0"):
box_center = tf.random.uniform(
shape=[1, 3], minval=-10.0, maxval=10.0
)
box_dim = tf.random.uniform(
shape=[1, 3], minval=0.1, maxval=10.0
)
boxes = tf.concat([box_center, box_dim, [[0.0]]], axis=-1)
points = tf.random.normal([32, 3])
res = keras_cv.point_cloud.is_within_any_box3d(points, boxes)
res_v2 = keras_cv.point_cloud.is_within_any_box3d_v2(
points, boxes
)
res_v3 = keras_cv.point_cloud.is_within_any_box3d_v3(
points, boxes
)
self.assertAllEqual(res, res_v2)
self.assertAllEqual(res, res_v3)
| keras-cv/keras_cv/point_cloud/within_box_3d_test.py/0 | {
"file_path": "keras-cv/keras_cv/point_cloud/within_box_3d_test.py",
"repo_id": "keras-cv",
"token_count": 4191
} | 30 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv.tests.test_case import TestCase
from keras_cv.utils import fill_utils
class BoundingBoxToMaskTest(TestCase):
def _run_test(self, corners, expected):
mask = fill_utils.corners_to_mask(corners, mask_shape=(6, 6))
mask = tf.cast(mask, dtype=tf.int32)
tf.assert_equal(mask, expected)
def test_corners_whole(self):
expected = np.array(
[
[0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[1, 0, 4, 3]], dtype="float32")
self._run_test(corners, expected)
def test_corners_frac(self):
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[1.5, 0.5, 4.5, 3.5]], dtype="float32")
self._run_test(corners, expected)
def test_width_zero(self):
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[0, 0, 0, 3]], dtype="float32")
self._run_test(corners, expected)
def test_height_zero(self):
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[1, 0, 4, 0]], dtype="float32")
self._run_test(corners, expected)
def test_width_negative(self):
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[1, 0, -2, 3]], dtype="float32")
self._run_test(corners, expected)
def test_height_negative(self):
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[1, 0, 4, -2]], dtype="float32")
self._run_test(corners, expected)
def test_width_out_of_lower_bound(self):
expected = np.array(
[
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[-2, -2, 2, 3]], dtype="float32")
self._run_test(corners, expected)
def test_width_out_of_upper_bound(self):
expected = np.array(
[
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[4, 0, 8, 3]], dtype="float32")
self._run_test(corners, expected)
def test_height_out_of_lower_bound(self):
expected = np.array(
[
[0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[1, -3, 4, 2]], dtype="float32")
self._run_test(corners, expected)
def test_height_out_of_upper_bound(self):
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0],
],
dtype="int32",
)
corners = np.array([[1, 4, 4, 9]], dtype="float32")
self._run_test(corners, expected)
def test_start_out_of_upper_bound(self):
expected = np.array(
[
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype="int32",
)
corners = np.array([[8, 8, 10, 12]], dtype="float32")
self._run_test(corners, expected)
class FillRectangleTest(TestCase):
def _run_test(self, img_w, img_h, cent_x, cent_y, rec_w, rec_h, expected):
batch_size = 1
batch_shape = (batch_size, img_h, img_w, 1)
images = np.ones(batch_shape, dtype="int32")
centers_x = tf.fill([batch_size], cent_x)
centers_y = tf.fill([batch_size], cent_y)
width = tf.fill([batch_size], rec_w)
height = tf.fill([batch_size], rec_h)
fill = tf.zeros_like(images)
filled_images = fill_utils.fill_rectangle(
images, centers_x, centers_y, width, height, fill
)
# remove batch dimension and channel dimension
filled_images = filled_images[0, ..., 0]
tf.assert_equal(filled_images, expected)
def test_rectangle_position(self):
img_w, img_h = 8, 8
cent_x, cent_y = 4, 3
rec_w, rec_h = 5, 3
expected = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
],
dtype="int32",
)
self._run_test(img_w, img_h, cent_x, cent_y, rec_w, rec_h, expected)
def test_width_out_of_lower_bound(self):
img_w, img_h = 8, 8
cent_x, cent_y = 1, 3
rec_w, rec_h = 5, 3
# assert width is truncated when cent_x - rec_w < 0
expected = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
],
dtype="int32",
)
self._run_test(img_w, img_h, cent_x, cent_y, rec_w, rec_h, expected)
def test_width_out_of_upper_bound(self):
img_w, img_h = 8, 8
cent_x, cent_y = 6, 3
rec_w, rec_h = 5, 3
# assert width is truncated when cent_x + rec_w > img_w
expected = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
],
dtype="int32",
)
self._run_test(img_w, img_h, cent_x, cent_y, rec_w, rec_h, expected)
def test_height_out_of_lower_bound(self):
img_w, img_h = 8, 8
cent_x, cent_y = 4, 1
rec_w, rec_h = 3, 5
# assert height is truncated when cent_y - rec_h < 0
expected = np.array(
[
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
],
dtype="int32",
)
self._run_test(img_w, img_h, cent_x, cent_y, rec_w, rec_h, expected)
def test_height_out_of_upper_bound(self):
img_w, img_h = 8, 8
cent_x, cent_y = 4, 6
rec_w, rec_h = 3, 5
# assert height is truncated when cent_y + rec_h > img_h
expected = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
],
dtype="int32",
)
self._run_test(img_w, img_h, cent_x, cent_y, rec_w, rec_h, expected)
def test_different_fill(self):
batch_size = 2
img_w, img_h = 5, 5
cent_x, cent_y = 2, 2
rec_w, rec_h = 3, 3
batch_shape = (batch_size, img_h, img_w, 1)
images = np.ones(batch_shape, dtype="int32")
centers_x = tf.fill([batch_size], cent_x)
centers_y = tf.fill([batch_size], cent_y)
width = tf.fill([batch_size], rec_w)
height = tf.fill([batch_size], rec_h)
fill = tf.stack(
[tf.fill(images[0].shape, 2), tf.fill(images[1].shape, 3)]
)
filled_images = fill_utils.fill_rectangle(
images, centers_x, centers_y, width, height, fill
)
# remove channel dimension
filled_images = filled_images[..., 0]
expected = np.array(
[
[
[1, 1, 1, 1, 1],
[1, 2, 2, 2, 1],
[1, 2, 2, 2, 1],
[1, 2, 2, 2, 1],
[1, 1, 1, 1, 1],
],
[
[1, 1, 1, 1, 1],
[1, 3, 3, 3, 1],
[1, 3, 3, 3, 1],
[1, 3, 3, 3, 1],
[1, 1, 1, 1, 1],
],
],
dtype="int32",
)
tf.assert_equal(filled_images, expected)
| keras-cv/keras_cv/utils/fill_utils_test.py/0 | {
"file_path": "keras-cv/keras_cv/utils/fill_utils_test.py",
"repo_id": "keras-cv",
"token_count": 7182
} | 31 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import cv2
except:
cv2 = None
import numpy as np
from keras_cv import bounding_box
from keras_cv import utils
from keras_cv.api_export import keras_cv_export
from keras_cv.utils import assert_cv2_installed
@keras_cv_export("keras_cv.visualization.draw_bounding_boxes")
def draw_bounding_boxes(
images,
bounding_boxes,
color,
bounding_box_format,
line_thickness=1,
text_thickness=1,
font_scale=1.0,
class_mapping=None,
):
"""Internal utility to draw bounding boxes on the target image.
Accepts a batch of images and batch of bounding boxes. The function draws
the bounding boxes onto the image, and returns a new image tensor with the
annotated images. This API is intentionally not exported, and is considered
an implementation detail.
Args:
images: a batch Tensor of images to plot bounding boxes onto.
bounding_boxes: a Tensor of batched bounding boxes to plot onto the
provided images.
color: the color in which to plot the bounding boxes
bounding_box_format: The format of bounding boxes to plot onto the
images. Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box formats.
line_thickness: (Optional) line_thickness for the box and text labels.
Defaults to 2.
text_thickness: (Optional) the thickness for the text, defaults to
`1.0`.
font_scale: (Optional) scale of font to draw in, defaults to `1.0`.
class_mapping: (Optional) dictionary from class ID to class label.
Returns:
the input `images` with provided bounding boxes plotted on top of them
""" # noqa: E501
assert_cv2_installed("draw_bounding_boxes")
bounding_boxes = bounding_box.convert_format(
bounding_boxes, source=bounding_box_format, target="xyxy", images=images
)
text_thickness = text_thickness or line_thickness
bounding_boxes["boxes"] = utils.to_numpy(bounding_boxes["boxes"])
bounding_boxes["classes"] = utils.to_numpy(bounding_boxes["classes"])
images = utils.to_numpy(images)
image_width = images.shape[-2]
outline_factor = image_width // 100
class_mapping = class_mapping or {}
result = []
if len(images.shape) != 4:
raise ValueError(
"Images must be a batched np-like with elements of shape "
"(height, width, 3)"
)
for i in range(images.shape[0]):
bounding_box_batch = {
"boxes": bounding_boxes["boxes"][i],
"classes": bounding_boxes["classes"][i],
}
if "confidence" in bounding_boxes:
bounding_box_batch["confidence"] = bounding_boxes["confidence"][i]
image = utils.to_numpy(images[i]).astype("uint8")
for b_id in range(bounding_box_batch["boxes"].shape[0]):
x, y, x2, y2 = bounding_box_batch["boxes"][b_id].astype(int)
class_id = bounding_box_batch["classes"][b_id].astype(int)
confidence = bounding_box_batch.get("confidence", None)
if class_id == -1:
continue
# force conversion back to contiguous array
x, y, x2, y2 = int(x), int(y), int(x2), int(y2)
cv2.rectangle(
image,
(x, y),
(x2, y2),
(0, 0, 0, 0.5),
line_thickness + outline_factor,
)
cv2.rectangle(image, (x, y), (x2, y2), color, line_thickness)
class_id = int(class_id)
if class_id in class_mapping:
label = class_mapping[class_id]
if confidence is not None:
label = f"{label} | {confidence[b_id]:.2f}"
x, y = _find_text_location(
x, y, font_scale, line_thickness, outline_factor
)
cv2.putText(
image,
label,
(x, y),
cv2.FONT_HERSHEY_SIMPLEX,
font_scale,
(0, 0, 0, 0.5),
text_thickness + outline_factor,
)
cv2.putText(
image,
label,
(x, y),
cv2.FONT_HERSHEY_SIMPLEX,
font_scale,
color,
text_thickness,
)
result.append(image)
return np.array(result).astype(int)
def _find_text_location(x, y, font_scale, line_thickness, outline_factor):
font_height = int(font_scale * 12)
target_y = y - int(8 + outline_factor)
if target_y - (2 * font_height) > 0:
return x, y - int(8 + outline_factor)
line_offset = line_thickness + outline_factor
static_offset = 3
return (
x + outline_factor + static_offset,
y + (2 * font_height) + line_offset + static_offset,
)
| keras-cv/keras_cv/visualization/draw_bounding_boxes.py/0 | {
"file_path": "keras-cv/keras_cv/visualization/draw_bounding_boxes.py",
"repo_id": "keras-cv",
"token_count": 2568
} | 32 |
#!/bin/bash
isort .
black .
find . -iname *.h -o -iname *.c -o -iname *.cpp -o -iname *.hpp -o -iname *.cc \
| xargs clang-format --style=google -i -fallback-style=none
| keras-cv/shell/format.sh/0 | {
"file_path": "keras-cv/shell/format.sh",
"repo_id": "keras-cv",
"token_count": 80
} | 33 |
# API Design Guidelines
In general, KerasCV abides to the
[API design guidelines of Keras](https://github.com/keras-team/governance/blob/master/keras_api_design_guidelines.md).
There are a few API guidelines that apply only to KerasCV. These are discussed
in this document.
# Label Names
When working with `bounding_box` and `segmentation_map` labels the
abbreviations `bbox` and `segm` are often used. In KerasCV, we will *not* be
using these abbreviations. This is done to ensure full consistency in our
naming convention. While the team is fond of the abbreviation `bbox`, we are
less fond of `segm`. In order to ensure full consistency, we have decided to
use the full names for label types in our code base.
# Preprocessing Layers
## Strength Parameters
Many augmentation layers take a parameter representing a strength, often called
`factor`. When possible, factor values must conform to the range: `[0, 1]`, with
1 representing the strongest transformation and 0 representing a no-op transform.
The strength of an augmentation should scale linearly with this factor. If needed,
a transformation can be performed to map to a large value range internally. If
this is done, please provide a thorough explanation of the value range semantics in
the docstring.
Additionally, factors should support both float and tuples as inputs. If a float is
passed, such as `factor=0.5`, the layer should default to the range `[0, factor]`.
## BaseImageAugmentationLayer
When implementing preprocessing, we encourage users to subclass the
`keras_cv.layers.preprocessing.BaseImageAugmentationLayer`. This layer provides
a common `call()` method, auto vectorization, and more.
When subclassing `BaseImageAugmentationLayer`, several methods can overridden:
- `BaseImageAugmentationLayer.augment_image()` must be overridden
- `augment_label()` allows updates to be made to labels
- `augment_bounding_box()` allows updates to bounding boxes to be made
[`RandomShear` serves as a canonical example of how to subclass `BaseImageAugmentationLayer`](https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing/random_shear.py)
## Vectorization
`BaseImageAugmentationLayer` requires you to implement augmentations in an
image-wise basis instead of using a vectorized approach. This design choice
was based made on the results found in the
[vectorization\_strategy\_benchmark.py](../benchmarks/vectorization_strategy_benchmark.py)
benchmark.
In short, the benchmark shows that making use of `tf.vectorized_map()` performs
almost identically to a manually vectorized implementation. As such, we have
decided to rely on `tf.vectorized_map()` for performance.
![Results of vectorization strategy benchmark](images/runtime-plot.png)
## Color Based Preprocessing Layers
Some preprocessing layers in KerasCV perform color based transformations. This
includes `RandomBrightness`, `Equalize`, `Solarization`, and more.
Preprocessing layers that perform color based transformations make the
following assumptions:
- these layers must accept a `value_range`, which is a tuple of numbers.
- `value_range` must default to `(0, 255)`
- input images may be of any `dtype`
The decision to support inputs of any `dtype` is made based on the nuance that
some Keras layers cast user inputs without the user knowing. For example, if
`Solarization` expected user inputs to be of type `int`, and a custom layer
was accidentally casting inputs to `float32`, it would be a bad user experience
to raise an error asserting that all inputs must be of type `int`.
New preprocessing layers should be consistent with these decisions.
# Codesamples
- Import symbols from top level namespaces in code samples (usage docstring for example).
Prefer:
```python
import keras_cv.layers.StochasticDepth
```
to:
```python
keras_cv.layers.regularization.stochastic_depth.StochasticDepth
```
| keras-cv/API_DESIGN.md/0 | {
"file_path": "keras-cv/API_DESIGN.md",
"repo_id": "keras-cv",
"token_count": 1029
} | 0 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from keras_cv.layers import Grayscale
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
class OldGrayscale(BaseImageAugmentationLayer):
"""Grayscale is a preprocessing layer that transforms RGB images to
Grayscale images.
Input images should have values in the range of [0, 255].
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
output_channels.
Number color channels present in the output image.
The output_channels can be 1 or 3. RGB image with shape
(..., height, width, 3) will have the following shapes
after the `Grayscale` operation:
a. (..., height, width, 1) if output_channels = 1
b. (..., height, width, 3) if output_channels = 3.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
to_grayscale = keras_cv.layers.preprocessing.Grayscale()
augmented_images = to_grayscale(images)
```
"""
def __init__(self, output_channels=1, **kwargs):
super().__init__(**kwargs)
self.output_channels = output_channels
# This layer may raise an error when running on GPU using auto_vectorize
self.auto_vectorize = False
def compute_image_signature(self, images):
# required because of the `output_channels` argument
if isinstance(images, tf.RaggedTensor):
ragged_spec = tf.RaggedTensorSpec(
shape=images.shape[1:3] + [self.output_channels],
ragged_rank=1,
dtype=self.compute_dtype,
)
return ragged_spec
return tf.TensorSpec(
images.shape[1:3] + [self.output_channels], self.compute_dtype
)
def _check_input_params(self, output_channels):
if output_channels not in [1, 3]:
raise ValueError(
"Received invalid argument output_channels. "
f"output_channels must be in 1 or 3. Got {output_channels}"
)
self.output_channels = output_channels
def augment_image(self, image, transformation=None, **kwargs):
grayscale = tf.image.rgb_to_grayscale(image)
if self.output_channels == 1:
return grayscale
elif self.output_channels == 3:
return tf.image.grayscale_to_rgb(grayscale)
else:
raise ValueError("Unsupported value for `output_channels`.")
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = {
"output_channels": self.output_channels,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(float)
x_train.shape
images = []
num_images = [1000, 2000, 5000, 10000]
results = {}
for aug in [Grayscale, OldGrayscale]:
c = aug.__name__
layer = aug()
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
c = aug.__name__ + " Graph Mode"
layer = aug()
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.show()
# So we can actually see more relevant margins
del results["OldGrayscale"]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.show()
| keras-cv/benchmarks/vectorized_grayscale.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_grayscale.py",
"repo_id": "keras-cv",
"token_count": 2285
} | 1 |
import time
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
from tensorflow import keras
from keras_cv.layers import BaseImageAugmentationLayer
from keras_cv.layers import Solarization
from keras_cv.utils import preprocessing
class OldSolarization(BaseImageAugmentationLayer):
def __init__(
self,
value_range,
addition_factor=0.0,
threshold_factor=0.0,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.seed = seed
self.addition_factor = preprocessing.parse_factor(
addition_factor,
max_value=255,
seed=seed,
param_name="addition_factor",
)
self.threshold_factor = preprocessing.parse_factor(
threshold_factor,
max_value=255,
seed=seed,
param_name="threshold_factor",
)
self.value_range = value_range
def get_random_transformation(self, **kwargs):
return (
self.addition_factor(dtype=self.compute_dtype),
self.threshold_factor(dtype=self.compute_dtype),
)
def augment_image(self, image, transformation=None, **kwargs):
(addition, threshold) = transformation
image = preprocessing.transform_value_range(
image,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
result = image + addition
result = tf.clip_by_value(result, 0, 255)
result = tf.where(result < threshold, result, 255 - result)
result = preprocessing.transform_value_range(
result,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
return result
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = {
"threshold_factor": self.threshold_factor,
"addition_factor": self.addition_factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["threshold_factor"], dict):
config["threshold_factor"] = keras.utils.deserialize_keras_object(
config["threshold_factor"]
)
if isinstance(config["addition_factor"], dict):
config["addition_factor"] = keras.utils.deserialize_keras_object(
config["addition_factor"]
)
return cls(**config)
class SolarizationTest(tf.test.TestCase):
def test_consistency_with_old_implementation(self):
images = tf.random.uniform(shape=(16, 32, 32, 3))
output = Solarization(
value_range=(0, 1),
threshold_factor=(200, 200),
addition_factor=(100, 100),
)(images)
old_output = OldSolarization(
value_range=(0, 1),
threshold_factor=(200, 200),
addition_factor=(100, 100),
)(images)
self.assertAllClose(old_output, output)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [1000, 2000, 5000, 10000]
results = {}
aug_candidates = [Solarization, OldSolarization]
aug_args = {"value_range": (0, 255), "threshold_factor": 0.5}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
c = aug.__name__ + " XLA Mode"
layer = aug(**aug_args)
@tf.function(jit_compile=True)
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_solarization.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_solarization.py",
"repo_id": "keras-cv",
"token_count": 2943
} | 2 |