artfan123 commited on
Commit
539f5c1
Β·
1 Parent(s): c073f85

Upload 11 files

Browse files
Files changed (11) hide show
  1. 105.jpg +0 -0
  2. 24.jpg +0 -0
  3. 339.jpg +0 -0
  4. 344.jpg +0 -0
  5. 50.jpg +0 -0
  6. README.md +3 -3
  7. app.py +51 -0
  8. gitattributes.txt +35 -0
  9. requirements.txt +5 -0
  10. resnet18_binary_prediction.pth +3 -0
  11. resnet34_binary_prediction.pth +3 -0
105.jpg ADDED
24.jpg ADDED
339.jpg ADDED
344.jpg ADDED
50.jpg ADDED
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
  title: AI Generated Art Classifier
3
- emoji: πŸ‘€
4
- colorFrom: blue
5
  colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 3.50.2
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
  title: AI Generated Art Classifier
3
+ emoji: πŸŒ–
4
+ colorFrom: indigo
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 3.36.1
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from transformers import ImageClassificationPipeline, AutoImageProcessor, AutoModelForImageClassification, ResNetForImageClassification
4
+ #
5
+ #
6
+ import torch
7
+
8
+ from transformers import pipeline
9
+
10
+ feature_extractor = AutoImageProcessor.from_pretrained("artfan123/resnet-18-finetuned-ai-art")
11
+ model = AutoModelForImageClassification.from_pretrained("artfan123/resnet-18-finetuned-ai-art")
12
+
13
+ image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)
14
+
15
+ def classify_image(image):
16
+ results = image_pipe(image)
17
+ # convert to format Gradio expects
18
+ output = {}
19
+ for prediction in results:
20
+ predicted_label = prediction['label']
21
+ score = prediction['score']
22
+ output[predicted_label] = score
23
+ return output
24
+
25
+ image = gr.inputs.Image(type="pil")
26
+ label = gr.outputs.Label(num_top_classes=2)
27
+ examples = [['50.jpg'], ['344.jpg'],['24.jpg'], ['339.jpg'], ['105.jpg']]
28
+ title = "AI Art Detector"
29
+ description = "A deep learning model that detects whether an image is AI generated or human made. Upload image or use the example images below."
30
+ gr.Interface(fn=classify_image, inputs=image, outputs=label, title=title, description=description, examples=examples, enable_queue=True).launch(debug=True)
31
+
32
+ # if __name__ == "__main__":
33
+ # with gr.Blocks() as demo:
34
+
35
+ # with gr.Row():
36
+ # with gr.Column(scale=4.5):
37
+ # with gr.Group():
38
+ # image_prompt = gr.Image(type='pil', shape=[512,512],label="Input Image")
39
+ # gr.Examples(inputs=image_prompt,examples=[['50.jpg'], ['344.jpg'],['24.jpg'], ['339.jpg'], ['105.jpg']])
40
+ # with gr.Row():
41
+ # clear_button = gr.Button('Clear')
42
+ # run_button = gr.Button('Predict')
43
+
44
+ # with gr.Column(scale=5.5):
45
+ # image_output = gr.Image(type='pil', shape=[512,512], label="Prediction")
46
+ # clear_button.click(lambda: None, None, image_prompt, queue=False)
47
+ # clear_button.click(lambda: None, None, image_output, queue=False)
48
+
49
+ # run_button.click(fn=segment,inputs=[image_prompt],
50
+ # outputs=[image_output])
51
+ # demo.queue().launch(share=True)
gitattributes.txt ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ pillow
4
+ pip>=23.2
5
+ gradio_client==0.2.7
resnet18_binary_prediction.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:843d8c350ba4722e8ce7a0be74f091b3c7dec33632f976a20de974f08228b8ef
3
+ size 45052303
resnet34_binary_prediction.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97747af36e73906f4491912661553dfee265362586d8baa4d5ecd47d96dc29a0
3
+ size 85548687