yukiapple323 commited on
Commit
1f64ced
1 Parent(s): 8c34d2d

Update index.html

Browse files
Files changed (1) hide show
  1. index.html +98 -82
index.html CHANGED
@@ -1,85 +1,101 @@
1
  <!DOCTYPE html>
2
- <html>
3
- <head>
4
- <meta charset="utf-8">
5
- <meta name="viewport" content="width=device-width, initial-scale=1">
6
- <title>Gradio-Lite: Serverless Gradio Running Entirely in Your Browser</title>
7
- <meta name="description" content="Gradio-Lite: Serverless Gradio Running Entirely in Your Browser">
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- <script type="module" crossorigin src="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.js"></script>
10
- <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.css" />
11
-
12
- <style>
13
- html, body {
14
- margin: 0;
15
- padding: 0;
16
- height: 100%;
17
  }
18
- </style>
19
- </head>
20
- <body>
21
- <gradio-lite>
22
- <gradio-file name="app.py" entrypoint>
23
- import gradio as gr
24
- from transformers_js_py import pipeline
25
- from filters import convert
26
-
27
- pipe = await pipeline('object-detection', 'Xenova/detr-resnet-50')
28
-
29
- async def fn(image):
30
- result = await pipe(image)
31
- return result
32
-
33
- #demo = gr.Interface.from_pipeline(pipe)
34
-
35
- async def predict(image):
36
- result = await pipe(image)
37
- print(result)
38
- result = convert(result)
39
- print(result)
40
- return image, result
41
-
42
- demo = gr.Interface(
43
- fn=predict,
44
- inputs=gr.Image(type='pil'),
45
- outputs=gr.AnnotatedImage(),
46
- title='On-Device Object-Detection with Gradio-Lite & Transformers.js'
47
- )
48
-
49
- demo.launch()
50
- </gradio-file>
51
-
52
- <gradio-file name="filters.py">
53
- def convert(input_data):
54
- # Initialize the output list
55
- result_labels = []
56
-
57
- # Iterate over each item in the input data
58
- for item in input_data:
59
- # Extract the label
60
- label = item['label']
61
-
62
- # Extract the bounding box coordinates
63
- xmin = item['box']['xmin']
64
- ymin = item['box']['ymin']
65
- xmax = item['box']['xmax']
66
- ymax = item['box']['ymax']
67
-
68
- # Convert coordinates into the required output format (list of coordinates)
69
- coordinates = [xmin, ymin, xmax, ymax]
70
-
71
- # Append the tuple of coordinates and label to the output list
72
- result_labels.append((coordinates, label))
73
-
74
- # Return the output list
75
- return result_labels
76
-
77
- </gradio-file>
78
-
79
- <gradio-requirements>
80
- # Same syntax as requirements.txt
81
- transformers-js-py
82
- </gradio-requirements>
83
- </gradio-lite>
84
- </body>
85
- </html>
 
1
  <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>Candy Label Scanner</title>
7
+ <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script>
8
+ <script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/coco-ssd"></script>
9
+ <script src="https://cdn.jsdelivr.net/npm/tesseract.js"></script>
10
+ <style>
11
+ #output {
12
+ font-size: 20px;
13
+ margin-top: 20px;
14
+ }
15
+ .red {
16
+ color: red;
17
+ }
18
+ .yellow {
19
+ color: yellow;
20
+ }
21
+ .green {
22
+ color: green;
23
+ }
24
+ video {
25
+ width: 100%;
26
+ height: auto;
27
+ }
28
+ </style>
29
+ </head>
30
+ <body>
31
+ <h1>Candy Label Scanner</h1>
32
+ <video id="video" autoplay></video>
33
+ <button id="capture">Capture</button>
34
+ <canvas id="canvas" style="display: none;"></canvas>
35
+ <div id="output"></div>
36
 
37
+ <script>
38
+ const video = document.getElementById('video');
39
+ const canvas = document.getElementById('canvas');
40
+ const output = document.getElementById('output');
41
+ const captureButton = document.getElementById('capture');
42
+ navigator.mediaDevices.getUserMedia({
43
+ video: {
44
+ facingMode: { exact: "environment" }
45
  }
46
+ })
47
+ .then(stream => {
48
+ video.srcObject = stream;
49
+ })
50
+ .catch(err => {
51
+ console.error("Error accessing the camera: ", err);
52
+ });
53
+ captureButton.addEventListener('click', () => {
54
+ // Draw the video frame to the canvas
55
+ canvas.width = video.videoWidth;
56
+ canvas.height = video.videoHeight;
57
+ const context = canvas.getContext('2d');
58
+ context.drawImage(video, 0, 0, canvas.width, canvas.height);
59
+ // Convert canvas to image data
60
+ const dataURL = canvas.toDataURL('image/png');
61
+ // Process the image with Tesseract
62
+ Tesseract.recognize(
63
+ dataURL,
64
+ 'kor',
65
+ "image-to-text",
66
+ model="team-lucid/trocr-small-korean",
67
+ {
68
+ logger: m => console.log(m)
69
+ }
70
+ ).then(({ data: { text } }) => {
71
+ console.log(text);
72
+ analyzeNutrition(text);
73
+ });
74
+ });
75
+ function analyzeNutrition(text) {
76
+ // Extract nutritional values (assuming sugar content is labeled as '당류' in Korean)
77
+ const regex = /당류\s*:\s*(\d+(\.\d+)?)\s*g\s*/; // This regex might need adjustments based on label format
78
+ const match = text.match(regex);
79
+ let outputDiv = document.getElementById('output');
80
+ if (match) {
81
+ const sugarContent = parseFloat(match[1]);
82
+ let message = `Sugar content: ${sugarContent}g - `;
83
+ if (sugarContent > 20) {
84
+ message += 'Dangerous';
85
+ outputDiv.className = 'red';
86
+ } else if (sugarContent > 10) {
87
+ message += 'Normal';
88
+ outputDiv.className = 'yellow';
89
+ } else {
90
+ message += 'Good';
91
+ outputDiv.className = 'green';
92
+ }
93
+ outputDiv.textContent = message;
94
+ } else {
95
+ outputDiv.textContent = 'Sugar content not found';
96
+ outputDiv.className = '';
97
+ }
98
+ }
99
+ </script>
100
+ </body>
101
+ </html>