Noah-Wang commited on
Commit
1284b56
1 Parent(s): e777148

Initialize public

Browse files
AIArtDetector.pth-af59f7fa.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40eb1aef6b5d269dfaaa500534552add0dbb847c24f18fae6f6c2a32ae859b32
3
+ size 134
README.md CHANGED
@@ -1,3 +1,9 @@
1
- ---
2
- license: unknown
3
- ---
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ language:
4
+ - en
5
+ metrics:
6
+ - accuracy
7
+ library_name: timm
8
+ pipeline_tag: image-classification
9
+ ---
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "timm_backbone",
3
+ "architecture": "eva02_base_patch14_448",
4
+ "num_classes": 3,
5
+ "num_features": 768,
6
+ "global_pool": "avg",
7
+ "pretrained_cfg": {
8
+ "tag": "mim_in22k_ft_in22k_in1k",
9
+ "custom_load": false,
10
+ "input_size": [
11
+ 3,
12
+ 448,
13
+ 448
14
+ ],
15
+ "fixed_input_size": true,
16
+ "interpolation": "bicubic",
17
+ "crop_pct": 1.0,
18
+ "crop_mode": "squash",
19
+ "mean": [
20
+ 0.48145466,
21
+ 0.4578275,
22
+ 0.40821073
23
+ ],
24
+ "std": [
25
+ 0.26862954,
26
+ 0.26130258,
27
+ 0.27577711
28
+ ],
29
+ "num_classes": 3,
30
+ "pool_size": null,
31
+ "first_conv": "patch_embed.proj",
32
+ "classifier": "head",
33
+ "license": "mit"
34
+ }
35
+ }
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
handler.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ import timm
3
+ import torch
4
+ from timm.utils import ParseKwargs
5
+ from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT
6
+
7
+ class EndpointHandler():
8
+ def __init__(self, path=""):
9
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
10
+ # self.aiGeneratorModel = timm.create_model('eva02_base_patch14_448.mim_in22k_ft_in22k_in1k', num_classes=9, in_chans=3, checkpoint_path=path + '/AIModelDetector.pth-6ff3631e.pth')
11
+ aiArtModel = timm.create_model('eva02_base_patch14_448.mim_in22k_ft_in22k_in1k', num_classes=3, in_chans=3, checkpoint_path=path + '/AIArtDetector.pth-af59f7fa.pth')
12
+ # aiGeneratorModel = aiGeneratorModel.to(self.device)
13
+ aiArtModel = aiArtModel.to(self.device)
14
+ # aiGeneratorModel.eval()
15
+ aiArtModel.eval()
16
+
17
+ self.transform = timm.data.create_transform(input_size=(3, 448, 448),
18
+ is_training=False,
19
+ use_prefetcher=False,
20
+ no_aug=False,
21
+ scale=None,
22
+ ratio=None,
23
+ hflip=0,
24
+ vflip=0.,
25
+ color_jitter=0,
26
+ auto_augment=None,
27
+ interpolation='bicubic',
28
+ # mean=(0.5, 0.5, 0.5),
29
+ # std=(0.5, 0.5, 0.5),
30
+ re_prob=0.,
31
+ re_mode='const',
32
+ re_count=1,
33
+ re_num_splits=0,
34
+ crop_pct=1.0,
35
+ # crop_mode='center',
36
+ crop_mode='squash',
37
+ tf_preprocessing=False,
38
+ separate=False)
39
+
40
+ def __call__(self, data):
41
+ """
42
+ data args:
43
+ inputs: Dict[str, Any]
44
+ Return:
45
+ A :obj:`list` | `dict`: will be serialized and returned
46
+ """
47
+ # get inputs
48
+ image = data.pop("inputs", data)
49
+
50
+ image_tensor1 = self.transform(image).to(self.device)
51
+ with torch.no_grad():
52
+ output1 = self.aiArtModel(image_tensor1.unsqueeze(0))
53
+
54
+ return output1
55
+
56
+
57
+
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af59f7fa807b4fa8c6fb7177c700b0a0aebb81f16bd0ba96828c7c95d8843a90
3
+ size 345475737
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ torch
2
+ timm