try:
    import cloudinary
except:
    import os 
    os.system('pip install cloudinary')
try:
    import detectron2
except:
    import os 
    os.system('pip install git+https://github.com/facebookresearch/detectron2.git')

import cv2
import json
from matplotlib.pyplot import axis
import gradio as gr
import requests
import numpy as np
from torch import nn
import requests
from numpy.lib.type_check import imag
import random
import time

import csv
import torch

from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from detectron2.data.datasets import register_coco_instances



# import some common detectron2 utilities
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog

from detectron2.utils.visualizer import ColorMode

import cloudinary.uploader
import io
import os
API_KEY =os.environ["API_KEY"] 
Cloud_Name =os.environ["Cloud_Name"] 
API_Secret =os.environ["API_Secret"]  




cfg = get_cfg()

# try:
#     register_coco_instances("Fiber", {}, "./labels-fiver.json", "./Fiber")
#     Fiber_metadata = MetadataCatalog.get("Fiber")
#     dataset_dicts = DatasetCatalog.get("Fiber")
# except:
#     print("there is an issue")
register_coco_instances("Fiber", {}, "./labels-fiver.json", "./Fiber")
Fiber_metadata = MetadataCatalog.get("Fiber")
dataset_dicts = DatasetCatalog.get("Fiber")
model_path = "./model_final.pth"

# cfg = get_cfg()
# cfg.merge_from_file("./configs/detectron2/faster_rcnn_R_50_FPN_3x.yaml")
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
# cfg.MODEL.WEIGHTS = model_path

my_metadata = MetadataCatalog.get("dbmdz_coco_all")
# Fiber_metadata.thing_classes = ["Fiber", "Fiber","Fiber"]
my_metadata.thing_classes = ["Fiber", "Fiber"]

cfg.merge_from_file("./configs/detectron2/mask_rcnn_R_50_FPN_3x.yaml")
cfg.MODEL.WEIGHTS = model_path #os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05  # set the testing threshold for this model
cfg.DATASETS.TEST = ("fiber", )
# predictor = DefaultPredictor(cfg)
if not torch.cuda.is_available():
    cfg.MODEL.DEVICE = "cpu"


def inference(image_url, image, min_score):
    if image_url:
        r = requests.get(image_url)
        if r:
            im = np.frombuffer(r.content, dtype="uint8")
            im = cv2.imdecode(im, cv2.IMREAD_COLOR)
    else:
        # Model expect BGR!
        im = image[:,:,::-1]

    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.005
    predictor = DefaultPredictor(cfg)

    outputs = predictor(im)

    # v = Visualizer(im, my_metadata, scale=1)
    # out = v.draw_instance_predictions(outputs["instances"].to("cpu"))


    
    # for d in random.sample(dataset_dicts, 3):
    #     img = cv2.imread("https://meet.google.com/ice-wndh-joi.jpg")
    #     predictor(img)
    #     visualizer = Visualizer(img[:, :, ::-1], metadata=Fiber_metadata, scale=1)
    #     vis = visualizer.draw_dataset_dict(d)
    #     cv2_imshow(vis.get_image()[:, :, ::-1])
    # !zip -r ./fiber.zip  "/content/Fiber"
    # import json
    # from google.colab import files
    # uploaded = files.upload()
    
    # im = cv2.imread([key for key in uploaded.keys()
    #   ][0])
    
    
    
    # im = cv2.imread(d["file_name"])
    # outputs = predictor(im)
    v = Visualizer(im[:, :, ::-1],
                    metadata=my_metadata , #Fiber_metadata, 
                    scale=1, 
                    instance_mode=ColorMode.IMAGE_BW   # remove the colors of unsegmented pixels
    )
    v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
    # cv2_imshow(v.get_image()[:, :, ::-1])
    # print(outputs["instances"])
    masks = np.asarray(outputs["instances"].pred_masks.to("cpu"))
    # bbox = np.asarray(outputs["instances"].pred_boxes.to("cpu"))
    
    # Pick an item to mask
    # img=v.get_image()
    
    # Define a dictionary to store the measurements and their positions
    measurements = {}
    
    for ind,item_mask in enumerate(masks):
      # box=bbox[ind]
      # Get the true bounding box of the mask (not the same as the bbox prediction)
      segmentation = np.where(item_mask == True)
      x_min = int(np.min(segmentation[1]))
      x_max = int(np.max(segmentation[1]))
      y_min = int(np.min(segmentation[0]))
      y_max = int(np.max(segmentation[0]))
      measurement = int(0.5+len(segmentation[0])/600)
      measurements[ind] = {'measurement': measurement, 'x_min': x_min, 'x_max': x_max, 'y_min': y_min, 'y_max': y_max}
      # cv2.putText(img=img, text=str(int(0.5+len( segmentation[0])/600)), org=(x_min+20,y_min-10), fontFace=cv2.FONT_HERSHEY_TRIPLEX, fontScale=0.8, color=(0, 255, 0),thickness=2)
    # cv2_imshow(img)
    
    
    # Loop over the masks
    # return outputs
    for ind, item_mask in enumerate(masks):
        segmentation = np.where(item_mask == True)
        measurement = int(0.5+len(segmentation[0])/600)
        measurements[ind] = {'measurement': measurement, 'x_min': x_min, 'x_max': x_max, 'y_min': y_min, 'y_max': y_max}
    cloudinary.config(
      cloud_name = Cloud_Name,
      api_key = API_KEY,
      api_secret = API_Secret,
      secure = True
    )
    
    # Replace with the name of your CSV file
    filename = "your_file.csv"
    
    


    # Write the measurements to a CSV file
    filename=str(time.time())+'dmeasurements.csv'
    with open(filename, mode='w') as file:
        writer = csv.writer(file)
        writer.writerow(['ID', 'Measurement', 'X_Min', 'X_Max', 'Y_Min', 'Y_Max'])
        for id, data in measurements.items():
            writer.writerow([id, data['measurement'], data['x_min'], data['x_max'], data['y_min'], data['y_max']])
            # Convert the CSV content to a bytes object
    
    csv_bytes = io.StringIO( open(filename,"r").read()).read().encode("utf-8")
    
    # Upload the file to Cloudinary
    upload_result = cloudinary.uploader.upload(
    csv_bytes,
    resource_type = "raw",
    folder = "csv_files",
    public_id =filename,
    overwrite = False
    )
    # return file

    return upload_result["url"], v.get_image()


title = " fi ber detec tion Model "
description = ""
article = ''

gr.Interface(
    inference,
    [gr.inputs.Textbox(label="Image URL", placeholder="https://api.digitale-sammlungen.de/iiif/image/v2/bsb10483966_00008/full/500,/0/default.jpg"),
     gr.inputs.Image(type="numpy", label="Input Image"),
     gr.Slider(minimum=0.0, maximum=1.0, value=0.01, label="Minimum score"),
    ], 


    title=title,
    description=description,
    article=article,

    examples=[],
      outputs=[ "text","image"],
).launch()