File size: 3,175 Bytes
b28a8cb
 
 
 
 
4b2b7ee
b28a8cb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f751922
b28a8cb
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import gradio as gr
import tensorflow as tf
from huggingface_hub import from_pretrained_keras
import numpy as np

adamatch_model = from_pretrained_keras("keras-io/adamatch-domain-adaption")
base_model = from_pretrained_keras("johko/wideresnet28-2-mnist")


labels = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]

def predict_image(image, model):
    image = tf.constant(image)
    image = tf.reshape(image, [-1, 32, 32, 3])
    probs_ada_mnist = model.predict(image)[0,:]
    top_pred = probs_ada_mnist.tolist()
    return {labels[i]: top_pred[i] for i in range(10)}

def infer(mnist_img, svhn_img, model):
    labels_out = []
    for im in [mnist_img, svhn_img]:
        labels_out.append(predict_image(im, model))
    return labels_out
    
def infer_ada(mnist_image, svhn_image):
    return infer(mnist_image, svhn_image, adamatch_model)

def infer_base(mnist_image, svhn_image):
    return infer(mnist_image, svhn_image, base_model)
    

def infer_all(mnist_image, svhn_image):
    base_res = infer_base(mnist_image, svhn_image)
    ada_res = infer_ada(mnist_image, svhn_image)
    return base_res.extend(ada_res)

article = """<center>

Authors: <a href='https://twitter.com/johko990' target='_blank'>Johannes Kolbe</a> based on an example by [Sayak Paul](https://twitter.com/RisingSayak) on
<a href='https://keras.io/examples/vision/adamatch/' target='_blank'>**keras.io**</a>"""



description = """<center> 

This space lets you compare image classification results of identical architecture (WideResNet-2-28) models. The training of one of the models was improved 
by using AdaMatch as seen in the example on [keras.io](https://keras.io/examples/vision/adamatch/).

The base model was only trained on the MNIST dataset and shows a low classification accuracy (8.96%) for a different domain dataset like SVHN. The AdaMatch model 
uses a semi-supervised domain adaption approach to adapt to the SVHN dataset and shows a significantly higher accuracy (26.51%).
"""
mnist_image_base = gr.inputs.Image(shape=(32, 32))
svhn_image_base = gr.inputs.Image(shape=(32, 32))
mnist_image_ada = gr.inputs.Image(shape=(32, 32))
svhn_image_ada = gr.inputs.Image(shape=(32, 32))

label_mnist_base = gr.outputs.Label(num_top_classes=3, label="MNIST Prediction Base")
label_svhn_base = gr.outputs.Label(num_top_classes=3, label="SVHN Prediction Base")
label_mnist_ada = gr.outputs.Label(num_top_classes=3, label="MNIST Prediction AdaMatch")
label_svhn_ada = gr.outputs.Label(num_top_classes=3, label="SVHN Prediction AdaMatch")
    

base_iface = gr.Interface(
    fn=infer_base,
    inputs=[mnist_image_base, svhn_image_base],
    outputs=[label_mnist_base,label_svhn_base]
)

ada_iface = gr.Interface(
    fn=infer_ada,
    inputs=[mnist_image_ada, svhn_image_ada],
    outputs=[label_mnist_ada,label_svhn_ada]
)

gr.Parallel(base_iface, 
            ada_iface, 
            examples=[
              ["examples/mnist_3.jpg", "examples/svhn_3.jpeg"], 
              ["examples/mnist_8.jpg", "examples/svhn_8.jpg"]
            ],     
            title="Semi-Supervised Domain Adaption with AdaMatch",
            article=article,
            description=description,
).launch()