Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -314,7 +314,6 @@
|
|
314 |
# )
|
315 |
# iface.launch()
|
316 |
|
317 |
-
|
318 |
import gradio as gr
|
319 |
import numpy as np
|
320 |
import cv2 as cv
|
@@ -403,8 +402,6 @@ def classify(platform, UserInput, Images, Textbox2, Textbox3):
|
|
403 |
if max_rounded_prediction > 0.5:
|
404 |
print("\nWays to dispose of this waste: " + max_label)
|
405 |
messages.append({"role": "user", "content": content + " " + max_label})
|
406 |
-
# messages.append({"role": "user", "content": max_label})
|
407 |
-
|
408 |
print("IMAGE messages after appending:", messages)
|
409 |
|
410 |
header = {
|
@@ -418,12 +415,12 @@ def classify(platform, UserInput, Images, Textbox2, Textbox3):
|
|
418 |
"messages": messages,
|
419 |
"model": model_llm
|
420 |
}).json()
|
421 |
-
print("RESPONSE TRY",response)
|
422 |
reply = response["choices"][0]["message"]["content"]
|
423 |
messages.append({"role": "assistant", "content": reply})
|
424 |
output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
|
425 |
-
except:
|
426 |
-
print("
|
427 |
|
428 |
elif max_rounded_prediction < 0.5:
|
429 |
output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"})
|
@@ -451,15 +448,18 @@ def classify(platform, UserInput, Images, Textbox2, Textbox3):
|
|
451 |
"Authorization": f"Bearer {auth}"
|
452 |
}
|
453 |
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
|
|
458 |
|
459 |
-
|
460 |
-
|
461 |
|
462 |
-
|
|
|
|
|
463 |
|
464 |
return output
|
465 |
else:
|
@@ -481,6 +481,173 @@ iface = gr.Interface(
|
|
481 |
)
|
482 |
iface.launch()
|
483 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
484 |
# import gradio as gr
|
485 |
# import numpy as np
|
486 |
# import cv2 as cv
|
|
|
314 |
# )
|
315 |
# iface.launch()
|
316 |
|
|
|
317 |
import gradio as gr
|
318 |
import numpy as np
|
319 |
import cv2 as cv
|
|
|
402 |
if max_rounded_prediction > 0.5:
|
403 |
print("\nWays to dispose of this waste: " + max_label)
|
404 |
messages.append({"role": "user", "content": content + " " + max_label})
|
|
|
|
|
405 |
print("IMAGE messages after appending:", messages)
|
406 |
|
407 |
header = {
|
|
|
415 |
"messages": messages,
|
416 |
"model": model_llm
|
417 |
}).json()
|
418 |
+
print("RESPONSE TRY", response)
|
419 |
reply = response["choices"][0]["message"]["content"]
|
420 |
messages.append({"role": "assistant", "content": reply})
|
421 |
output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
|
422 |
+
except Exception as e:
|
423 |
+
print("ERROR:", e)
|
424 |
|
425 |
elif max_rounded_prediction < 0.5:
|
426 |
output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"})
|
|
|
448 |
"Authorization": f"Bearer {auth}"
|
449 |
}
|
450 |
|
451 |
+
try:
|
452 |
+
response = requests.post(host, headers=headers, json={
|
453 |
+
"messages": messages,
|
454 |
+
"model": model_llm
|
455 |
+
}).json()
|
456 |
|
457 |
+
reply = response["choices"][0]["message"]["content"]
|
458 |
+
messages.append({"role": "assistant", "content": reply})
|
459 |
|
460 |
+
output.append({"Mode": "Chat", "content": reply})
|
461 |
+
except Exception as e:
|
462 |
+
print("ERROR:", e)
|
463 |
|
464 |
return output
|
465 |
else:
|
|
|
481 |
)
|
482 |
iface.launch()
|
483 |
|
484 |
+
|
485 |
+
###### import gradio as gr
|
486 |
+
# import numpy as np
|
487 |
+
# import cv2 as cv
|
488 |
+
# import requests
|
489 |
+
# import io
|
490 |
+
# from PIL import Image
|
491 |
+
# import os
|
492 |
+
# import tensorflow as tf
|
493 |
+
# import random
|
494 |
+
|
495 |
+
# host = os.environ.get("host")
|
496 |
+
# code = os.environ.get("code")
|
497 |
+
# model_llm = os.environ.get("model")
|
498 |
+
# content = os.environ.get("content")
|
499 |
+
# state = os.environ.get("state")
|
500 |
+
# system = os.environ.get("system")
|
501 |
+
# auth = os.environ.get("auth")
|
502 |
+
# auth2 = os.environ.get("auth2")
|
503 |
+
# data = None
|
504 |
+
|
505 |
+
# np.set_printoptions(suppress=True)
|
506 |
+
|
507 |
+
# model = tf.keras.models.load_model('keras_model.h5')
|
508 |
+
# data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
|
509 |
+
|
510 |
+
# with open("labels.txt", "r") as file:
|
511 |
+
# labels = file.read().splitlines()
|
512 |
+
|
513 |
+
# messages = [
|
514 |
+
# {"role": "system", "content": system}
|
515 |
+
# ]
|
516 |
+
|
517 |
+
# def classify(platform, UserInput, Images, Textbox2, Textbox3):
|
518 |
+
# if Textbox3 == code:
|
519 |
+
# imageData = None
|
520 |
+
# if Images != "None":
|
521 |
+
# output = []
|
522 |
+
# headers = {
|
523 |
+
# "Authorization": f"Bearer {auth2}"
|
524 |
+
# }
|
525 |
+
# if platform == "wh":
|
526 |
+
# get_image = requests.get(Images, headers=headers)
|
527 |
+
# if get_image.status_code == 200:
|
528 |
+
# image_data = get_image.content
|
529 |
+
# elif platform == "web":
|
530 |
+
# print("WEB")
|
531 |
+
# else:
|
532 |
+
# pass
|
533 |
+
|
534 |
+
# image = cv.imdecode(np.frombuffer(image_data, np.uint8), cv.IMREAD_COLOR)
|
535 |
+
# image = cv.resize(image, (224, 224))
|
536 |
+
# image_array = np.asarray(image)
|
537 |
+
# normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
|
538 |
+
# data[0] = normalized_image_array
|
539 |
+
|
540 |
+
# prediction = model.predict(data)
|
541 |
+
|
542 |
+
# max_label_index = None
|
543 |
+
# max_prediction_value = -1
|
544 |
+
|
545 |
+
# print('Prediction')
|
546 |
+
|
547 |
+
# Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
|
548 |
+
# Textbox2 = Textbox2.split(",")
|
549 |
+
# Textbox2_edited = [x.strip() for x in Textbox2]
|
550 |
+
# Textbox2_edited = list(Textbox2_edited)
|
551 |
+
# Textbox2_edited.append(UserInput)
|
552 |
+
# print(UserInput)
|
553 |
+
# print("appending")
|
554 |
+
# messages.append({"role": "user", "content": UserInput})
|
555 |
+
|
556 |
+
# for i, label in enumerate(labels):
|
557 |
+
# prediction_value = float(prediction[0][i])
|
558 |
+
# rounded_value = round(prediction_value, 2)
|
559 |
+
# print(f'{label}: {rounded_value}')
|
560 |
+
|
561 |
+
# if prediction_value > max_prediction_value:
|
562 |
+
# max_label_index = i
|
563 |
+
# max_prediction_value = prediction_value
|
564 |
+
|
565 |
+
# if max_label_index is not None:
|
566 |
+
# max_label = labels[max_label_index].split(' ', 1)[1]
|
567 |
+
# max_rounded_prediction = round(max_prediction_value, 2)
|
568 |
+
# print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}')
|
569 |
+
|
570 |
+
# if max_rounded_prediction > 0.5:
|
571 |
+
# print("\nWays to dispose of this waste: " + max_label)
|
572 |
+
# messages.append({"role": "user", "content": content + " " + max_label})
|
573 |
+
# # messages.append({"role": "user", "content": max_label})
|
574 |
+
|
575 |
+
# print("IMAGE messages after appending:", messages)
|
576 |
+
|
577 |
+
# header = {
|
578 |
+
# "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
|
579 |
+
# "Content-Type": "application/json",
|
580 |
+
# "Authorization": f"Bearer {auth}"
|
581 |
+
# }
|
582 |
+
|
583 |
+
# try:
|
584 |
+
# response = requests.post(host, headers=header, json={
|
585 |
+
# "messages": messages,
|
586 |
+
# "model": model_llm
|
587 |
+
# }).json()
|
588 |
+
# print("RESPONSE TRY",response)
|
589 |
+
# reply = response["choices"][0]["message"]["content"]
|
590 |
+
# messages.append({"role": "assistant", "content": reply})
|
591 |
+
# output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
|
592 |
+
# except:
|
593 |
+
# print("DOESN'T WORK")
|
594 |
+
|
595 |
+
# elif max_rounded_prediction < 0.5:
|
596 |
+
# output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"})
|
597 |
+
|
598 |
+
# return output
|
599 |
+
|
600 |
+
# elif Images == "None":
|
601 |
+
# output = []
|
602 |
+
|
603 |
+
# Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
|
604 |
+
# Textbox2 = Textbox2.split(",")
|
605 |
+
# Textbox2_edited = [x.strip() for x in Textbox2]
|
606 |
+
# Textbox2_edited = list(Textbox2_edited)
|
607 |
+
# Textbox2_edited.append(UserInput)
|
608 |
+
|
609 |
+
# for i in Textbox2_edited:
|
610 |
+
# messages.append({"role": "user", "content": i})
|
611 |
+
|
612 |
+
# print("messages after appending:", messages)
|
613 |
+
|
614 |
+
# messages.append({"role": "user", "content": UserInput})
|
615 |
+
|
616 |
+
# headers = {
|
617 |
+
# "Content-Type": "application/json",
|
618 |
+
# "Authorization": f"Bearer {auth}"
|
619 |
+
# }
|
620 |
+
|
621 |
+
# response = requests.post(host, headers=headers, json={
|
622 |
+
# "messages": messages,
|
623 |
+
# "model": model_llm
|
624 |
+
# }).json()
|
625 |
+
|
626 |
+
# reply = response["choices"][0]["message"]["content"]
|
627 |
+
# messages.append({"role": "assistant", "content": reply})
|
628 |
+
|
629 |
+
# output.append({"Mode": "Chat", "content": reply})
|
630 |
+
|
631 |
+
# return output
|
632 |
+
# else:
|
633 |
+
# return "Unauthorized"
|
634 |
+
|
635 |
+
# user_inputs = [
|
636 |
+
# gr.Textbox(label="Platform", type="text"),
|
637 |
+
# gr.Textbox(label="User Input", type="text"),
|
638 |
+
# gr.Textbox(label="Image", type="text"),
|
639 |
+
# gr.Textbox(label="Textbox2", type="text"),
|
640 |
+
# gr.Textbox(label="Textbox3", type="password")
|
641 |
+
# ]
|
642 |
+
|
643 |
+
# iface = gr.Interface(
|
644 |
+
# fn=classify,
|
645 |
+
# inputs=user_inputs,
|
646 |
+
# outputs=gr.outputs.JSON(),
|
647 |
+
# title="Classifier",
|
648 |
+
# )
|
649 |
+
# iface.launch()
|
650 |
+
|
651 |
# import gradio as gr
|
652 |
# import numpy as np
|
653 |
# import cv2 as cv
|