File size: 6,016 Bytes
37c7e43
 
 
 
 
 
 
 
 
1cc1116
 
 
37c7e43
 
 
1cc1116
 
37c7e43
 
 
1cc1116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37c7e43
 
1cc1116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37c7e43
 
 
ad865a2
1cc1116
02bf256
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1cc1116
 
 
37c7e43
 
 
 
1cc1116
 
 
 
 
 
 
37c7e43
 
 
 
 
 
 
1cc1116
37c7e43
 
1cc1116
37c7e43
 
 
 
1cc1116
37c7e43
1cc1116
 
37c7e43
 
 
 
 
 
 
 
 
1cc1116
37c7e43
 
 
1cc1116
 
 
 
37c7e43
1cc1116
 
 
37c7e43
1cc1116
37c7e43
 
 
 
 
1cc1116
37c7e43
 
 
 
 
 
 
 
 
 
1cc1116
 
 
37c7e43
1cc1116
37c7e43
1cc1116
 
 
 
 
 
 
 
 
 
37c7e43
 
1cc1116
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
import streamlit as st
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
from torch.utils.data import DataLoader
from datasets import load_dataset
from huggingface_hub import HfApi, Repository
import os
import matplotlib.pyplot as plt

import utils

# Hugging Face Hub credentials
HF_TOKEN = os.getenv("HF_TOKEN")
MODEL_REPO_ID = "louiecerv/amer_sign_lang_data_augmentation"
DATASET_REPO_ID = "louiecerv/american_sign_language"

# Device configuration
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
st.write(f"Device: {device}")

# Define the new CNN model
IMG_HEIGHT = 28
IMG_WIDTH = 28
IMG_CHS = 1
N_CLASSES = 24

class MyConvBlock(nn.Module):
    def __init__(self, in_ch, out_ch, dropout_p):
        kernel_size = 3
        super().__init__()
        self.model = nn.Sequential(
            nn.Conv2d(in_ch, out_ch, kernel_size, stride=1, padding=1),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(),
            nn.Dropout(dropout_p),
            nn.MaxPool2d(2, stride=2)
        )

    def forward(self, x):
        return self.model(x)

flattened_img_size = 75 * 3 * 3

# Input 1 x 28 x 28
base_model = nn.Sequential(
    MyConvBlock(IMG_CHS, 25, 0), # 25 x 14 x 14
    MyConvBlock(25, 50, 0.2), # 50 x 7 x 7
    MyConvBlock(50, 75, 0), # 75 x 3 x 3
    nn.Flatten(),
    nn.Linear(flattened_img_size, 512),
    nn.Dropout(.3),
    nn.ReLU(),
    nn.Linear(512, N_CLASSES)
)

# Streamlit app
def main():
    st.title("ASL Model Uploader App")
    
    about = """
# American Sign Language Recognition

This project demonstrates a Convolutional Neural Network (CNN) model for recognizing American Sign Language (ASL) using the Hugging Face Hub and Streamlit for the user interface. The model is trained on a dataset of ASL images and includes data augmentation techniques to improve generalization.

## Table of Contents
- Introduction
- Installation
- Usage
- Model Architecture
- Data Augmentation
- Training
- Results
- Acknowledgements

## Introduction
This project aims to build a robust ASL recognition system using deep learning techniques. The model is implemented in PyTorch and utilizes the Hugging Face Hub for dataset management. The Streamlit app provides an interactive interface for training and visualizing the model's performance.

## Installation
To run this project, you need to have Python installed along with the following libraries:
- `streamlit`
- `torch`
- `torchvision`
- `datasets`
- `huggingface_hub`
- `matplotlib`

You can install the required libraries using:
```bash
pip install streamlit torch torchvision datasets huggingface_hub matplotlib
"""
    with st.expander("About", expanded=True):
        st.markdown(about)

    # Move slider and button to sidebar
    num_epochs = st.sidebar.slider("Number of Epochs", 1, 20, 5)
    train_button = st.sidebar.button("Train Model")

    # Load the dataset from Hugging Face Hub
    dataset = load_dataset(DATASET_REPO_ID)

    # Data loaders with preprocessing and data augmentation:
    random_transforms = transforms.Compose([
        transforms.RandomRotation(5),
        transforms.RandomResizedCrop((IMG_WIDTH, IMG_HEIGHT), scale=(.9, 1), ratio=(1, 1)),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=.2, contrast=.5),
        transforms.Normalize(mean=[0.5], std=[0.5])
    ])

    def collate_fn(batch):
        images = []
        labels = []
        for item in batch:
            if 'pixel_values' in item and 'label' in item:
                image = torch.tensor(item['pixel_values'])
                label = item['label']
                try:
                    image = random_transforms(image)
                    images.append(image)
                    labels.append(label)
                except Exception as e:
                    print(f"Error processing image: {e}")
                    continue

        if not images:
            return torch.tensor([]), torch.tensor([])

        images = torch.stack(images).to(device)
        labels = torch.tensor(labels).long().to(device)
        return images, labels

    train_loader = DataLoader(dataset["train"], batch_size=64, shuffle=True, collate_fn=collate_fn)
    val_loader = DataLoader(dataset["validation"], batch_size=64, collate_fn=collate_fn)

    # Model, loss, and optimizer
    model = base_model.to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    loss_history = []
    accuracy_history = []

    if train_button:
        for epoch in range(num_epochs):
            total = 0
            correct = 0
            epoch_loss = 0
            for i, (images, labels) in enumerate(train_loader):
                if images.nelement() == 0:
                    continue

                # Forward pass
                outputs = model(images)
                loss = criterion(outputs, labels)
                epoch_loss += loss.item()

                # Backward and optimize
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

            epoch_accuracy = 100 * correct / total
            loss_history.append(epoch_loss / len(train_loader))
            accuracy_history.append(epoch_accuracy)

            st.write(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {epoch_loss / len(train_loader):.4f}, Accuracy: {epoch_accuracy:.2f}%')

        # Plot loss and accuracy
        fig, ax1 = plt.subplots()
        ax2 = ax1.twinx()
        ax1.plot(loss_history, 'g-', label='Loss')
        ax2.plot(accuracy_history, 'b-', label='Accuracy')
        ax1.set_xlabel('Epoch')
        ax1.set_ylabel('Loss', color='g')
        ax2.set_ylabel('Accuracy (%)', color='b')
        plt.title('Training Loss and Accuracy')
        st.pyplot(fig)

if __name__ == "__main__":
    main()