Spaces:
Running
Running
Commit
Β·
38e5a7e
1
Parent(s):
801bd57
final CAM layers fixed
Browse files- app.py +30 -47
- list_modules.py +61 -0
- modules_SuSy_pt.txt +225 -0
- modules_ai-image-detector-deploy.txt +579 -0
app.py
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
# app.py ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
2 |
"""
|
3 |
Twoβstage local AIβimage detector
|
4 |
-
1. haywoodsloan/ai-image-detector-deploy β Real vs
|
5 |
-
2. SuSy.pt β Likely generator (
|
6 |
|
7 |
-
|
8 |
-
β’
|
9 |
-
β’
|
10 |
"""
|
11 |
|
12 |
import gradio as gr
|
@@ -22,33 +22,30 @@ BIN_ID = "haywoodsloan/ai-image-detector-deploy"
|
|
22 |
bin_proc = AutoImageProcessor.from_pretrained(BIN_ID)
|
23 |
bin_model = AutoModelForImageClassification.from_pretrained(BIN_ID)
|
24 |
bin_model.eval()
|
25 |
-
CAM_LAYER_BIN = "swinv2.layers.3.blocks.1.norm2" # <-- was *.stages.7.*
|
26 |
|
|
|
27 |
|
28 |
# ββββββββββββ Stageβ2 model (SuSy) ββββββββββββββββββββββββββββββββββ
|
29 |
-
susy_model
|
|
|
|
|
30 |
GEN_CLASSES = [
|
31 |
"Stable Diffusion 1.x", "DALLΒ·E 3", "MJ V5/V6",
|
32 |
"Stable Diffusion XL", "MJ V1/V2",
|
33 |
]
|
34 |
-
CAM_LAYER_SUSY = "blocks.11"
|
35 |
PATCH, TOP = 224, 5
|
36 |
|
37 |
# ββββββββββββ Heatβmap helper βββββββββββββββββββββββββββββββββββββββ
|
38 |
def grad_cam_overlay(model, inputs, target_layer, class_idx, orig_pil):
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
for k, v in (inputs.items() if isinstance(inputs, dict) else {"x": inputs}).items():
|
45 |
-
inputs[k] = v.to(device)
|
46 |
-
|
47 |
-
cam_ex = GradCAM(model, target_layer=target_layer, input_shape=next(iter(inputs.values())).shape)
|
48 |
scores = model(**inputs).logits if isinstance(inputs, dict) else model(inputs)
|
49 |
-
|
50 |
-
score.backward()
|
51 |
|
|
|
52 |
mask = cam_ex(class_idx)[0].cpu().numpy()
|
53 |
mask = (mask - mask.min()) / (mask.max() - mask.min() + 1e-6)
|
54 |
mask = Image.fromarray(np.uint8(plt.cm.jet(mask)[:, :, :3] * 255)).resize(orig_pil.size, Image.BICUBIC)
|
@@ -61,13 +58,12 @@ to_gray_pil = transforms.Compose([transforms.PILToTensor(), transforms.Graysca
|
|
61 |
def susy_predict(img: Image.Image) -> dict:
|
62 |
w, h = img.size
|
63 |
npx, npy = max(1, w // PATCH), max(1, h // PATCH)
|
64 |
-
|
65 |
patches = np.zeros((npx * npy, PATCH, PATCH, 3), dtype=np.uint8)
|
|
|
66 |
for i in range(npx):
|
67 |
for j in range(npy):
|
68 |
x, y = i * PATCH, j * PATCH
|
69 |
-
|
70 |
-
patches[i * npy + j] = np.array(crop)
|
71 |
|
72 |
contrasts = []
|
73 |
for p in patches:
|
@@ -75,7 +71,7 @@ def susy_predict(img: Image.Image) -> dict:
|
|
75 |
glcm = graycomatrix(g, [5], [0], 256, symmetric=True, normed=True)
|
76 |
contrasts.append(graycoprops(glcm, "contrast")[0, 0])
|
77 |
|
78 |
-
idx
|
79 |
tensor = torch.from_numpy(patches[idx].transpose(0, 3, 1, 2)).float() / 255.0
|
80 |
|
81 |
with torch.no_grad():
|
@@ -93,11 +89,9 @@ def pipeline(img_arr):
|
|
93 |
logits = bin_model(**inp_bin).logits
|
94 |
probs = torch.softmax(logits, -1)[0].tolist() # [artificial, real]
|
95 |
|
96 |
-
|
97 |
-
probs_d = dict(zip(labels, probs))
|
98 |
-
ai_conf, real_conf = probs_d.get("artificial", 0.0), probs_d.get("real", 0.0)
|
99 |
|
100 |
-
# GradβCAM for
|
101 |
class_idx = 0 if ai_conf >= real_conf else 1
|
102 |
heatmaps.append(
|
103 |
grad_cam_overlay(
|
@@ -108,23 +102,22 @@ def pipeline(img_arr):
|
|
108 |
)
|
109 |
)
|
110 |
|
111 |
-
#
|
112 |
-
msg
|
113 |
-
bar_df = None
|
114 |
-
bar_vis = False
|
115 |
|
116 |
-
# Stageβ2
|
117 |
if ai_conf > real_conf:
|
118 |
msg = f"AIβgenerated ({ai_conf*100:.1f} %)"
|
119 |
gen_probs = susy_predict(img)
|
120 |
bar_df = pd.DataFrame({"class": gen_probs.keys(), "prob": gen_probs.values()})
|
121 |
bar_vis = True
|
122 |
|
123 |
-
# SuSy heatβmap
|
124 |
with torch.no_grad():
|
125 |
t_inp = to_tensor(img.resize((224, 224))).unsqueeze(0)
|
126 |
-
|
127 |
-
susy_class =
|
|
|
128 |
heatmaps.append(
|
129 |
grad_cam_overlay(
|
130 |
susy_model, t_inp,
|
@@ -134,12 +127,7 @@ def pipeline(img_arr):
|
|
134 |
)
|
135 |
)
|
136 |
|
137 |
-
|
138 |
-
return (
|
139 |
-
msg,
|
140 |
-
gr.update(value=bar_df, visible=bar_vis),
|
141 |
-
heatmaps
|
142 |
-
)
|
143 |
|
144 |
# ββββββββββββ Gradio UI βββββββββββββββββββββββββββββββββββββββββββββ
|
145 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
@@ -163,11 +151,6 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
163 |
columns=2, height=300, visible=True
|
164 |
)
|
165 |
|
166 |
-
btn.click(
|
167 |
-
pipeline,
|
168 |
-
inputs=img_in,
|
169 |
-
outputs=[txt_bin, bar_gen, gal_cam]
|
170 |
-
)
|
171 |
|
172 |
demo.launch()
|
173 |
-
|
|
|
1 |
# app.py ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
2 |
"""
|
3 |
Twoβstage local AIβimage detector
|
4 |
+
1. haywoodsloan/ai-image-detector-deploy β Real vsΒ AI (SwinβV2)
|
5 |
+
2. SuSy.pt β Likely generator (ResNetβbased)
|
6 |
|
7 |
+
Includes GradβCAM overlays:
|
8 |
+
β’ always show heatβmap for binary decision
|
9 |
+
β’ if image is flagged AI, also show heatβmap for SuSy
|
10 |
"""
|
11 |
|
12 |
import gradio as gr
|
|
|
22 |
bin_proc = AutoImageProcessor.from_pretrained(BIN_ID)
|
23 |
bin_model = AutoModelForImageClassification.from_pretrained(BIN_ID)
|
24 |
bin_model.eval()
|
|
|
25 |
|
26 |
+
CAM_LAYER_BIN = "encoder.layers.3.blocks.1.layernorm_after" # <- from dump
|
27 |
|
28 |
# ββββββββββββ Stageβ2 model (SuSy) ββββββββββββββββββββββββββββββββββ
|
29 |
+
susy_model = torch.jit.load("SuSy.pt").eval()
|
30 |
+
CAM_LAYER_SUSY = "feature_extractor.resnet_model.layer4.1.relu" # <- from dump
|
31 |
+
|
32 |
GEN_CLASSES = [
|
33 |
"Stable Diffusion 1.x", "DALLΒ·E 3", "MJ V5/V6",
|
34 |
"Stable Diffusion XL", "MJ V1/V2",
|
35 |
]
|
|
|
36 |
PATCH, TOP = 224, 5
|
37 |
|
38 |
# ββββββββββββ Heatβmap helper βββββββββββββββββββββββββββββββββββββββ
|
39 |
def grad_cam_overlay(model, inputs, target_layer, class_idx, orig_pil):
|
40 |
+
# prepare GradβCAM extractor
|
41 |
+
cam_ex = GradCAM(model, target_layer=target_layer,
|
42 |
+
input_shape=next(iter(inputs.values()) if isinstance(inputs, dict) else [inputs]).shape)
|
43 |
+
|
44 |
+
# forward & backward
|
|
|
|
|
|
|
|
|
45 |
scores = model(**inputs).logits if isinstance(inputs, dict) else model(inputs)
|
46 |
+
scores[0, class_idx].backward()
|
|
|
47 |
|
48 |
+
# normalise cam
|
49 |
mask = cam_ex(class_idx)[0].cpu().numpy()
|
50 |
mask = (mask - mask.min()) / (mask.max() - mask.min() + 1e-6)
|
51 |
mask = Image.fromarray(np.uint8(plt.cm.jet(mask)[:, :, :3] * 255)).resize(orig_pil.size, Image.BICUBIC)
|
|
|
58 |
def susy_predict(img: Image.Image) -> dict:
|
59 |
w, h = img.size
|
60 |
npx, npy = max(1, w // PATCH), max(1, h // PATCH)
|
|
|
61 |
patches = np.zeros((npx * npy, PATCH, PATCH, 3), dtype=np.uint8)
|
62 |
+
|
63 |
for i in range(npx):
|
64 |
for j in range(npy):
|
65 |
x, y = i * PATCH, j * PATCH
|
66 |
+
patches[i * npy + j] = np.array(img.crop((x, y, x + PATCH, y + PATCH)).resize((PATCH, PATCH)))
|
|
|
67 |
|
68 |
contrasts = []
|
69 |
for p in patches:
|
|
|
71 |
glcm = graycomatrix(g, [5], [0], 256, symmetric=True, normed=True)
|
72 |
contrasts.append(graycoprops(glcm, "contrast")[0, 0])
|
73 |
|
74 |
+
idx = np.argsort(contrasts)[::-1][:TOP]
|
75 |
tensor = torch.from_numpy(patches[idx].transpose(0, 3, 1, 2)).float() / 255.0
|
76 |
|
77 |
with torch.no_grad():
|
|
|
89 |
logits = bin_model(**inp_bin).logits
|
90 |
probs = torch.softmax(logits, -1)[0].tolist() # [artificial, real]
|
91 |
|
92 |
+
ai_conf, real_conf = probs[0], probs[1]
|
|
|
|
|
93 |
|
94 |
+
# GradβCAM for winning class
|
95 |
class_idx = 0 if ai_conf >= real_conf else 1
|
96 |
heatmaps.append(
|
97 |
grad_cam_overlay(
|
|
|
102 |
)
|
103 |
)
|
104 |
|
105 |
+
# defaults
|
106 |
+
msg, bar_df, bar_vis = f"Authentic ({real_conf*100:.1f} %)", None, False
|
|
|
|
|
107 |
|
108 |
+
# Stageβ2 if AI
|
109 |
if ai_conf > real_conf:
|
110 |
msg = f"AIβgenerated ({ai_conf*100:.1f} %)"
|
111 |
gen_probs = susy_predict(img)
|
112 |
bar_df = pd.DataFrame({"class": gen_probs.keys(), "prob": gen_probs.values()})
|
113 |
bar_vis = True
|
114 |
|
115 |
+
# SuSy heatβmap: choose mostβprobable generator class
|
116 |
with torch.no_grad():
|
117 |
t_inp = to_tensor(img.resize((224, 224))).unsqueeze(0)
|
118 |
+
logits_susy = susy_model(t_inp)
|
119 |
+
susy_class = logits_susy[0, 1:].argmax().item() + 1 # skip 'real'
|
120 |
+
|
121 |
heatmaps.append(
|
122 |
grad_cam_overlay(
|
123 |
susy_model, t_inp,
|
|
|
127 |
)
|
128 |
)
|
129 |
|
130 |
+
return msg, gr.update(value=bar_df, visible=bar_vis), heatmaps
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
# ββββββββββββ Gradio UI βββββββββββββββββββββββββββββββββββββββββββββ
|
133 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
|
151 |
columns=2, height=300, visible=True
|
152 |
)
|
153 |
|
154 |
+
btn.click(pipeline, inputs=img_in, outputs=[txt_bin, bar_gen, gal_cam])
|
|
|
|
|
|
|
|
|
155 |
|
156 |
demo.launch()
|
|
list_modules.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
"""
|
3 |
+
list_modules.py
|
4 |
+
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
5 |
+
Print (and optionally save) the dotted names of **all** subβmodules
|
6 |
+
inside a PyTorch model. Handy for locating the correct layer name
|
7 |
+
for GradβCAM, feature hooks, etc.
|
8 |
+
|
9 |
+
USAGE
|
10 |
+
-----
|
11 |
+
edit MODEL_SOURCE and MODEL_TYPE below, then:
|
12 |
+
|
13 |
+
python list_modules.py
|
14 |
+
|
15 |
+
Outputs:
|
16 |
+
β’ console β first `LIMIT` names (to keep logs short)
|
17 |
+
β’ file β full list written to `modules_<model>.txt`
|
18 |
+
"""
|
19 |
+
|
20 |
+
from __future__ import annotations
|
21 |
+
import torch, argparse, pathlib, sys
|
22 |
+
from transformers import AutoModel
|
23 |
+
|
24 |
+
# ββββββββββββββ CONFIG βββββββββββββββββββββββββββββββββββββββββββββββ
|
25 |
+
MODEL_SOURCE = "haywoodsloan/ai-image-detector-deploy"
|
26 |
+
MODEL_TYPE = "huggingface"
|
27 |
+
|
28 |
+
LIMIT = 2000 # how many lines to print to stdout (None = all)
|
29 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
30 |
+
|
31 |
+
def load_model(src: str, src_type: str):
|
32 |
+
if src_type == "huggingface":
|
33 |
+
model = AutoModel.from_pretrained(src)
|
34 |
+
elif src_type == "torchscript":
|
35 |
+
model = torch.jit.load(src)
|
36 |
+
else:
|
37 |
+
raise ValueError("MODEL_TYPE must be 'huggingface' or 'torchscript'")
|
38 |
+
model.eval()
|
39 |
+
return model
|
40 |
+
|
41 |
+
def dump_module_names(model: torch.nn.Module,
|
42 |
+
out_file: pathlib.Path,
|
43 |
+
limit: int | None = None):
|
44 |
+
names = [n for n, _ in model.named_modules()] # includes root '' at idx 0
|
45 |
+
total = len(names)
|
46 |
+
|
47 |
+
print(f"\nβΆ total {total} subβmodules found\n")
|
48 |
+
for idx, name in enumerate(names):
|
49 |
+
if limit is None or idx < limit:
|
50 |
+
print(f"{idx:4d}: {name}")
|
51 |
+
|
52 |
+
out_file.write_text("\n".join(names), encoding="utfβ8")
|
53 |
+
print(f"\nβΆ wrote full list to {out_file}")
|
54 |
+
|
55 |
+
def main():
|
56 |
+
model = load_model(MODEL_SOURCE, MODEL_TYPE)
|
57 |
+
txt_path = pathlib.Path(f"modules_{MODEL_SOURCE.split('/')[-1].replace('.','_')}.txt")
|
58 |
+
dump_module_names(model, txt_path, LIMIT)
|
59 |
+
|
60 |
+
if __name__ == "__main__":
|
61 |
+
main()
|
modules_SuSy_pt.txt
ADDED
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
feature_extractor
|
3 |
+
feature_extractor.resnet_model
|
4 |
+
feature_extractor.resnet_model.conv1
|
5 |
+
feature_extractor.resnet_model.bn1
|
6 |
+
feature_extractor.resnet_model.relu
|
7 |
+
feature_extractor.resnet_model.maxpool
|
8 |
+
feature_extractor.resnet_model.layer1
|
9 |
+
feature_extractor.resnet_model.layer1.0
|
10 |
+
feature_extractor.resnet_model.layer1.0.conv1
|
11 |
+
feature_extractor.resnet_model.layer1.0.bn1
|
12 |
+
feature_extractor.resnet_model.layer1.0.relu
|
13 |
+
feature_extractor.resnet_model.layer1.0.conv2
|
14 |
+
feature_extractor.resnet_model.layer1.0.bn2
|
15 |
+
feature_extractor.resnet_model.layer1.1
|
16 |
+
feature_extractor.resnet_model.layer1.1.conv1
|
17 |
+
feature_extractor.resnet_model.layer1.1.bn1
|
18 |
+
feature_extractor.resnet_model.layer1.1.relu
|
19 |
+
feature_extractor.resnet_model.layer1.1.conv2
|
20 |
+
feature_extractor.resnet_model.layer1.1.bn2
|
21 |
+
feature_extractor.resnet_model.layer2
|
22 |
+
feature_extractor.resnet_model.layer2.0
|
23 |
+
feature_extractor.resnet_model.layer2.0.conv1
|
24 |
+
feature_extractor.resnet_model.layer2.0.bn1
|
25 |
+
feature_extractor.resnet_model.layer2.0.relu
|
26 |
+
feature_extractor.resnet_model.layer2.0.conv2
|
27 |
+
feature_extractor.resnet_model.layer2.0.bn2
|
28 |
+
feature_extractor.resnet_model.layer2.0.downsample
|
29 |
+
feature_extractor.resnet_model.layer2.0.downsample.0
|
30 |
+
feature_extractor.resnet_model.layer2.0.downsample.1
|
31 |
+
feature_extractor.resnet_model.layer2.1
|
32 |
+
feature_extractor.resnet_model.layer2.1.conv1
|
33 |
+
feature_extractor.resnet_model.layer2.1.bn1
|
34 |
+
feature_extractor.resnet_model.layer2.1.relu
|
35 |
+
feature_extractor.resnet_model.layer2.1.conv2
|
36 |
+
feature_extractor.resnet_model.layer2.1.bn2
|
37 |
+
feature_extractor.resnet_model.layer3
|
38 |
+
feature_extractor.resnet_model.layer3.0
|
39 |
+
feature_extractor.resnet_model.layer3.0.conv1
|
40 |
+
feature_extractor.resnet_model.layer3.0.bn1
|
41 |
+
feature_extractor.resnet_model.layer3.0.relu
|
42 |
+
feature_extractor.resnet_model.layer3.0.conv2
|
43 |
+
feature_extractor.resnet_model.layer3.0.bn2
|
44 |
+
feature_extractor.resnet_model.layer3.0.downsample
|
45 |
+
feature_extractor.resnet_model.layer3.0.downsample.0
|
46 |
+
feature_extractor.resnet_model.layer3.0.downsample.1
|
47 |
+
feature_extractor.resnet_model.layer3.1
|
48 |
+
feature_extractor.resnet_model.layer3.1.conv1
|
49 |
+
feature_extractor.resnet_model.layer3.1.bn1
|
50 |
+
feature_extractor.resnet_model.layer3.1.relu
|
51 |
+
feature_extractor.resnet_model.layer3.1.conv2
|
52 |
+
feature_extractor.resnet_model.layer3.1.bn2
|
53 |
+
feature_extractor.resnet_model.layer4
|
54 |
+
feature_extractor.resnet_model.layer4.0
|
55 |
+
feature_extractor.resnet_model.layer4.0.conv1
|
56 |
+
feature_extractor.resnet_model.layer4.0.bn1
|
57 |
+
feature_extractor.resnet_model.layer4.0.relu
|
58 |
+
feature_extractor.resnet_model.layer4.0.conv2
|
59 |
+
feature_extractor.resnet_model.layer4.0.bn2
|
60 |
+
feature_extractor.resnet_model.layer4.0.downsample
|
61 |
+
feature_extractor.resnet_model.layer4.0.downsample.0
|
62 |
+
feature_extractor.resnet_model.layer4.0.downsample.1
|
63 |
+
feature_extractor.resnet_model.layer4.1
|
64 |
+
feature_extractor.resnet_model.layer4.1.conv1
|
65 |
+
feature_extractor.resnet_model.layer4.1.bn1
|
66 |
+
feature_extractor.resnet_model.layer4.1.relu
|
67 |
+
feature_extractor.resnet_model.layer4.1.conv2
|
68 |
+
feature_extractor.resnet_model.layer4.1.bn2
|
69 |
+
feature_extractor.resnet_model.avgpool
|
70 |
+
feature_extractor.resnet_model.fc
|
71 |
+
feature_extractor.stages
|
72 |
+
feature_extractor.stages.0
|
73 |
+
feature_extractor.stages.0.0
|
74 |
+
feature_extractor.stages.0.1
|
75 |
+
feature_extractor.stages.0.2
|
76 |
+
feature_extractor.stages.0.3
|
77 |
+
feature_extractor.stages.1
|
78 |
+
feature_extractor.stages.1.0
|
79 |
+
feature_extractor.stages.1.0.conv1
|
80 |
+
feature_extractor.stages.1.0.bn1
|
81 |
+
feature_extractor.stages.1.0.relu
|
82 |
+
feature_extractor.stages.1.0.conv2
|
83 |
+
feature_extractor.stages.1.0.bn2
|
84 |
+
feature_extractor.stages.1.1
|
85 |
+
feature_extractor.stages.1.1.conv1
|
86 |
+
feature_extractor.stages.1.1.bn1
|
87 |
+
feature_extractor.stages.1.1.relu
|
88 |
+
feature_extractor.stages.1.1.conv2
|
89 |
+
feature_extractor.stages.1.1.bn2
|
90 |
+
feature_extractor.stages.2
|
91 |
+
feature_extractor.stages.2.0
|
92 |
+
feature_extractor.stages.2.0.conv1
|
93 |
+
feature_extractor.stages.2.0.bn1
|
94 |
+
feature_extractor.stages.2.0.relu
|
95 |
+
feature_extractor.stages.2.0.conv2
|
96 |
+
feature_extractor.stages.2.0.bn2
|
97 |
+
feature_extractor.stages.2.0.downsample
|
98 |
+
feature_extractor.stages.2.0.downsample.0
|
99 |
+
feature_extractor.stages.2.0.downsample.1
|
100 |
+
feature_extractor.stages.2.1
|
101 |
+
feature_extractor.stages.2.1.conv1
|
102 |
+
feature_extractor.stages.2.1.bn1
|
103 |
+
feature_extractor.stages.2.1.relu
|
104 |
+
feature_extractor.stages.2.1.conv2
|
105 |
+
feature_extractor.stages.2.1.bn2
|
106 |
+
feature_extractor.stages.3
|
107 |
+
feature_extractor.stages.3.0
|
108 |
+
feature_extractor.stages.3.0.conv1
|
109 |
+
feature_extractor.stages.3.0.bn1
|
110 |
+
feature_extractor.stages.3.0.relu
|
111 |
+
feature_extractor.stages.3.0.conv2
|
112 |
+
feature_extractor.stages.3.0.bn2
|
113 |
+
feature_extractor.stages.3.0.downsample
|
114 |
+
feature_extractor.stages.3.0.downsample.0
|
115 |
+
feature_extractor.stages.3.0.downsample.1
|
116 |
+
feature_extractor.stages.3.1
|
117 |
+
feature_extractor.stages.3.1.conv1
|
118 |
+
feature_extractor.stages.3.1.bn1
|
119 |
+
feature_extractor.stages.3.1.relu
|
120 |
+
feature_extractor.stages.3.1.conv2
|
121 |
+
feature_extractor.stages.3.1.bn2
|
122 |
+
feature_extractor.stages.4
|
123 |
+
feature_extractor.stages.4.0
|
124 |
+
feature_extractor.stages.4.0.conv1
|
125 |
+
feature_extractor.stages.4.0.bn1
|
126 |
+
feature_extractor.stages.4.0.relu
|
127 |
+
feature_extractor.stages.4.0.conv2
|
128 |
+
feature_extractor.stages.4.0.bn2
|
129 |
+
feature_extractor.stages.4.0.downsample
|
130 |
+
feature_extractor.stages.4.0.downsample.0
|
131 |
+
feature_extractor.stages.4.0.downsample.1
|
132 |
+
feature_extractor.stages.4.1
|
133 |
+
feature_extractor.stages.4.1.conv1
|
134 |
+
feature_extractor.stages.4.1.bn1
|
135 |
+
feature_extractor.stages.4.1.relu
|
136 |
+
feature_extractor.stages.4.1.conv2
|
137 |
+
feature_extractor.stages.4.1.bn2
|
138 |
+
feature_extractor.f11
|
139 |
+
feature_extractor.f11.0
|
140 |
+
feature_extractor.f11.1
|
141 |
+
feature_extractor.f11.2
|
142 |
+
feature_extractor.f12
|
143 |
+
feature_extractor.f12.0
|
144 |
+
feature_extractor.f12.1
|
145 |
+
feature_extractor.f12.2
|
146 |
+
feature_extractor.f22
|
147 |
+
feature_extractor.f22.0
|
148 |
+
feature_extractor.f22.1
|
149 |
+
feature_extractor.f22.2
|
150 |
+
feature_extractor.f13
|
151 |
+
feature_extractor.f13.0
|
152 |
+
feature_extractor.f13.1
|
153 |
+
feature_extractor.f13.2
|
154 |
+
feature_extractor.f23
|
155 |
+
feature_extractor.f23.0
|
156 |
+
feature_extractor.f23.1
|
157 |
+
feature_extractor.f23.2
|
158 |
+
feature_extractor.f33
|
159 |
+
feature_extractor.f33.0
|
160 |
+
feature_extractor.f33.1
|
161 |
+
feature_extractor.f33.2
|
162 |
+
feature_extractor.f14
|
163 |
+
feature_extractor.f14.0
|
164 |
+
feature_extractor.f14.1
|
165 |
+
feature_extractor.f14.2
|
166 |
+
feature_extractor.f24
|
167 |
+
feature_extractor.f24.0
|
168 |
+
feature_extractor.f24.1
|
169 |
+
feature_extractor.f24.2
|
170 |
+
feature_extractor.f34
|
171 |
+
feature_extractor.f34.0
|
172 |
+
feature_extractor.f34.1
|
173 |
+
feature_extractor.f34.2
|
174 |
+
feature_extractor.f44
|
175 |
+
feature_extractor.f44.0
|
176 |
+
feature_extractor.f44.1
|
177 |
+
feature_extractor.f44.2
|
178 |
+
feature_extractor.bottlenecks
|
179 |
+
feature_extractor.bottlenecks.0
|
180 |
+
feature_extractor.bottlenecks.0.0
|
181 |
+
feature_extractor.bottlenecks.0.1
|
182 |
+
feature_extractor.bottlenecks.0.2
|
183 |
+
feature_extractor.bottlenecks.1
|
184 |
+
feature_extractor.bottlenecks.1.0
|
185 |
+
feature_extractor.bottlenecks.1.1
|
186 |
+
feature_extractor.bottlenecks.1.2
|
187 |
+
feature_extractor.bottlenecks.2
|
188 |
+
feature_extractor.bottlenecks.2.0
|
189 |
+
feature_extractor.bottlenecks.2.1
|
190 |
+
feature_extractor.bottlenecks.2.2
|
191 |
+
feature_extractor.bottlenecks.3
|
192 |
+
feature_extractor.bottlenecks.3.0
|
193 |
+
feature_extractor.bottlenecks.3.1
|
194 |
+
feature_extractor.bottlenecks.3.2
|
195 |
+
feature_extractor.bottlenecks.4
|
196 |
+
feature_extractor.bottlenecks.4.0
|
197 |
+
feature_extractor.bottlenecks.4.1
|
198 |
+
feature_extractor.bottlenecks.4.2
|
199 |
+
feature_extractor.bottlenecks.5
|
200 |
+
feature_extractor.bottlenecks.5.0
|
201 |
+
feature_extractor.bottlenecks.5.1
|
202 |
+
feature_extractor.bottlenecks.5.2
|
203 |
+
feature_extractor.bottlenecks.6
|
204 |
+
feature_extractor.bottlenecks.6.0
|
205 |
+
feature_extractor.bottlenecks.6.1
|
206 |
+
feature_extractor.bottlenecks.6.2
|
207 |
+
feature_extractor.bottlenecks.7
|
208 |
+
feature_extractor.bottlenecks.7.0
|
209 |
+
feature_extractor.bottlenecks.7.1
|
210 |
+
feature_extractor.bottlenecks.7.2
|
211 |
+
feature_extractor.bottlenecks.8
|
212 |
+
feature_extractor.bottlenecks.8.0
|
213 |
+
feature_extractor.bottlenecks.8.1
|
214 |
+
feature_extractor.bottlenecks.8.2
|
215 |
+
feature_extractor.bottlenecks.9
|
216 |
+
feature_extractor.bottlenecks.9.0
|
217 |
+
feature_extractor.bottlenecks.9.1
|
218 |
+
feature_extractor.bottlenecks.9.2
|
219 |
+
feature_extractor.gap
|
220 |
+
mlp
|
221 |
+
mlp.fc1
|
222 |
+
mlp.fc2
|
223 |
+
mlp.fc3
|
224 |
+
mlp.dropout
|
225 |
+
softmax
|
modules_ai-image-detector-deploy.txt
ADDED
@@ -0,0 +1,579 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
embeddings
|
3 |
+
embeddings.patch_embeddings
|
4 |
+
embeddings.patch_embeddings.projection
|
5 |
+
embeddings.norm
|
6 |
+
embeddings.dropout
|
7 |
+
encoder
|
8 |
+
encoder.layers
|
9 |
+
encoder.layers.0
|
10 |
+
encoder.layers.0.blocks
|
11 |
+
encoder.layers.0.blocks.0
|
12 |
+
encoder.layers.0.blocks.0.attention
|
13 |
+
encoder.layers.0.blocks.0.attention.self
|
14 |
+
encoder.layers.0.blocks.0.attention.self.continuous_position_bias_mlp
|
15 |
+
encoder.layers.0.blocks.0.attention.self.continuous_position_bias_mlp.0
|
16 |
+
encoder.layers.0.blocks.0.attention.self.continuous_position_bias_mlp.1
|
17 |
+
encoder.layers.0.blocks.0.attention.self.continuous_position_bias_mlp.2
|
18 |
+
encoder.layers.0.blocks.0.attention.self.query
|
19 |
+
encoder.layers.0.blocks.0.attention.self.key
|
20 |
+
encoder.layers.0.blocks.0.attention.self.value
|
21 |
+
encoder.layers.0.blocks.0.attention.self.dropout
|
22 |
+
encoder.layers.0.blocks.0.attention.output
|
23 |
+
encoder.layers.0.blocks.0.attention.output.dense
|
24 |
+
encoder.layers.0.blocks.0.attention.output.dropout
|
25 |
+
encoder.layers.0.blocks.0.layernorm_before
|
26 |
+
encoder.layers.0.blocks.0.drop_path
|
27 |
+
encoder.layers.0.blocks.0.intermediate
|
28 |
+
encoder.layers.0.blocks.0.intermediate.dense
|
29 |
+
encoder.layers.0.blocks.0.intermediate.intermediate_act_fn
|
30 |
+
encoder.layers.0.blocks.0.output
|
31 |
+
encoder.layers.0.blocks.0.output.dense
|
32 |
+
encoder.layers.0.blocks.0.output.dropout
|
33 |
+
encoder.layers.0.blocks.0.layernorm_after
|
34 |
+
encoder.layers.0.blocks.1
|
35 |
+
encoder.layers.0.blocks.1.attention
|
36 |
+
encoder.layers.0.blocks.1.attention.self
|
37 |
+
encoder.layers.0.blocks.1.attention.self.continuous_position_bias_mlp
|
38 |
+
encoder.layers.0.blocks.1.attention.self.continuous_position_bias_mlp.0
|
39 |
+
encoder.layers.0.blocks.1.attention.self.continuous_position_bias_mlp.1
|
40 |
+
encoder.layers.0.blocks.1.attention.self.continuous_position_bias_mlp.2
|
41 |
+
encoder.layers.0.blocks.1.attention.self.query
|
42 |
+
encoder.layers.0.blocks.1.attention.self.key
|
43 |
+
encoder.layers.0.blocks.1.attention.self.value
|
44 |
+
encoder.layers.0.blocks.1.attention.self.dropout
|
45 |
+
encoder.layers.0.blocks.1.attention.output
|
46 |
+
encoder.layers.0.blocks.1.attention.output.dense
|
47 |
+
encoder.layers.0.blocks.1.attention.output.dropout
|
48 |
+
encoder.layers.0.blocks.1.layernorm_before
|
49 |
+
encoder.layers.0.blocks.1.drop_path
|
50 |
+
encoder.layers.0.blocks.1.intermediate
|
51 |
+
encoder.layers.0.blocks.1.intermediate.dense
|
52 |
+
encoder.layers.0.blocks.1.intermediate.intermediate_act_fn
|
53 |
+
encoder.layers.0.blocks.1.output
|
54 |
+
encoder.layers.0.blocks.1.output.dense
|
55 |
+
encoder.layers.0.blocks.1.output.dropout
|
56 |
+
encoder.layers.0.blocks.1.layernorm_after
|
57 |
+
encoder.layers.0.downsample
|
58 |
+
encoder.layers.0.downsample.reduction
|
59 |
+
encoder.layers.0.downsample.norm
|
60 |
+
encoder.layers.1
|
61 |
+
encoder.layers.1.blocks
|
62 |
+
encoder.layers.1.blocks.0
|
63 |
+
encoder.layers.1.blocks.0.attention
|
64 |
+
encoder.layers.1.blocks.0.attention.self
|
65 |
+
encoder.layers.1.blocks.0.attention.self.continuous_position_bias_mlp
|
66 |
+
encoder.layers.1.blocks.0.attention.self.continuous_position_bias_mlp.0
|
67 |
+
encoder.layers.1.blocks.0.attention.self.continuous_position_bias_mlp.1
|
68 |
+
encoder.layers.1.blocks.0.attention.self.continuous_position_bias_mlp.2
|
69 |
+
encoder.layers.1.blocks.0.attention.self.query
|
70 |
+
encoder.layers.1.blocks.0.attention.self.key
|
71 |
+
encoder.layers.1.blocks.0.attention.self.value
|
72 |
+
encoder.layers.1.blocks.0.attention.self.dropout
|
73 |
+
encoder.layers.1.blocks.0.attention.output
|
74 |
+
encoder.layers.1.blocks.0.attention.output.dense
|
75 |
+
encoder.layers.1.blocks.0.attention.output.dropout
|
76 |
+
encoder.layers.1.blocks.0.layernorm_before
|
77 |
+
encoder.layers.1.blocks.0.drop_path
|
78 |
+
encoder.layers.1.blocks.0.intermediate
|
79 |
+
encoder.layers.1.blocks.0.intermediate.dense
|
80 |
+
encoder.layers.1.blocks.0.intermediate.intermediate_act_fn
|
81 |
+
encoder.layers.1.blocks.0.output
|
82 |
+
encoder.layers.1.blocks.0.output.dense
|
83 |
+
encoder.layers.1.blocks.0.output.dropout
|
84 |
+
encoder.layers.1.blocks.0.layernorm_after
|
85 |
+
encoder.layers.1.blocks.1
|
86 |
+
encoder.layers.1.blocks.1.attention
|
87 |
+
encoder.layers.1.blocks.1.attention.self
|
88 |
+
encoder.layers.1.blocks.1.attention.self.continuous_position_bias_mlp
|
89 |
+
encoder.layers.1.blocks.1.attention.self.continuous_position_bias_mlp.0
|
90 |
+
encoder.layers.1.blocks.1.attention.self.continuous_position_bias_mlp.1
|
91 |
+
encoder.layers.1.blocks.1.attention.self.continuous_position_bias_mlp.2
|
92 |
+
encoder.layers.1.blocks.1.attention.self.query
|
93 |
+
encoder.layers.1.blocks.1.attention.self.key
|
94 |
+
encoder.layers.1.blocks.1.attention.self.value
|
95 |
+
encoder.layers.1.blocks.1.attention.self.dropout
|
96 |
+
encoder.layers.1.blocks.1.attention.output
|
97 |
+
encoder.layers.1.blocks.1.attention.output.dense
|
98 |
+
encoder.layers.1.blocks.1.attention.output.dropout
|
99 |
+
encoder.layers.1.blocks.1.layernorm_before
|
100 |
+
encoder.layers.1.blocks.1.drop_path
|
101 |
+
encoder.layers.1.blocks.1.intermediate
|
102 |
+
encoder.layers.1.blocks.1.intermediate.dense
|
103 |
+
encoder.layers.1.blocks.1.intermediate.intermediate_act_fn
|
104 |
+
encoder.layers.1.blocks.1.output
|
105 |
+
encoder.layers.1.blocks.1.output.dense
|
106 |
+
encoder.layers.1.blocks.1.output.dropout
|
107 |
+
encoder.layers.1.blocks.1.layernorm_after
|
108 |
+
encoder.layers.1.downsample
|
109 |
+
encoder.layers.1.downsample.reduction
|
110 |
+
encoder.layers.1.downsample.norm
|
111 |
+
encoder.layers.2
|
112 |
+
encoder.layers.2.blocks
|
113 |
+
encoder.layers.2.blocks.0
|
114 |
+
encoder.layers.2.blocks.0.attention
|
115 |
+
encoder.layers.2.blocks.0.attention.self
|
116 |
+
encoder.layers.2.blocks.0.attention.self.continuous_position_bias_mlp
|
117 |
+
encoder.layers.2.blocks.0.attention.self.continuous_position_bias_mlp.0
|
118 |
+
encoder.layers.2.blocks.0.attention.self.continuous_position_bias_mlp.1
|
119 |
+
encoder.layers.2.blocks.0.attention.self.continuous_position_bias_mlp.2
|
120 |
+
encoder.layers.2.blocks.0.attention.self.query
|
121 |
+
encoder.layers.2.blocks.0.attention.self.key
|
122 |
+
encoder.layers.2.blocks.0.attention.self.value
|
123 |
+
encoder.layers.2.blocks.0.attention.self.dropout
|
124 |
+
encoder.layers.2.blocks.0.attention.output
|
125 |
+
encoder.layers.2.blocks.0.attention.output.dense
|
126 |
+
encoder.layers.2.blocks.0.attention.output.dropout
|
127 |
+
encoder.layers.2.blocks.0.layernorm_before
|
128 |
+
encoder.layers.2.blocks.0.drop_path
|
129 |
+
encoder.layers.2.blocks.0.intermediate
|
130 |
+
encoder.layers.2.blocks.0.intermediate.dense
|
131 |
+
encoder.layers.2.blocks.0.intermediate.intermediate_act_fn
|
132 |
+
encoder.layers.2.blocks.0.output
|
133 |
+
encoder.layers.2.blocks.0.output.dense
|
134 |
+
encoder.layers.2.blocks.0.output.dropout
|
135 |
+
encoder.layers.2.blocks.0.layernorm_after
|
136 |
+
encoder.layers.2.blocks.1
|
137 |
+
encoder.layers.2.blocks.1.attention
|
138 |
+
encoder.layers.2.blocks.1.attention.self
|
139 |
+
encoder.layers.2.blocks.1.attention.self.continuous_position_bias_mlp
|
140 |
+
encoder.layers.2.blocks.1.attention.self.continuous_position_bias_mlp.0
|
141 |
+
encoder.layers.2.blocks.1.attention.self.continuous_position_bias_mlp.1
|
142 |
+
encoder.layers.2.blocks.1.attention.self.continuous_position_bias_mlp.2
|
143 |
+
encoder.layers.2.blocks.1.attention.self.query
|
144 |
+
encoder.layers.2.blocks.1.attention.self.key
|
145 |
+
encoder.layers.2.blocks.1.attention.self.value
|
146 |
+
encoder.layers.2.blocks.1.attention.self.dropout
|
147 |
+
encoder.layers.2.blocks.1.attention.output
|
148 |
+
encoder.layers.2.blocks.1.attention.output.dense
|
149 |
+
encoder.layers.2.blocks.1.attention.output.dropout
|
150 |
+
encoder.layers.2.blocks.1.layernorm_before
|
151 |
+
encoder.layers.2.blocks.1.drop_path
|
152 |
+
encoder.layers.2.blocks.1.intermediate
|
153 |
+
encoder.layers.2.blocks.1.intermediate.dense
|
154 |
+
encoder.layers.2.blocks.1.intermediate.intermediate_act_fn
|
155 |
+
encoder.layers.2.blocks.1.output
|
156 |
+
encoder.layers.2.blocks.1.output.dense
|
157 |
+
encoder.layers.2.blocks.1.output.dropout
|
158 |
+
encoder.layers.2.blocks.1.layernorm_after
|
159 |
+
encoder.layers.2.blocks.2
|
160 |
+
encoder.layers.2.blocks.2.attention
|
161 |
+
encoder.layers.2.blocks.2.attention.self
|
162 |
+
encoder.layers.2.blocks.2.attention.self.continuous_position_bias_mlp
|
163 |
+
encoder.layers.2.blocks.2.attention.self.continuous_position_bias_mlp.0
|
164 |
+
encoder.layers.2.blocks.2.attention.self.continuous_position_bias_mlp.1
|
165 |
+
encoder.layers.2.blocks.2.attention.self.continuous_position_bias_mlp.2
|
166 |
+
encoder.layers.2.blocks.2.attention.self.query
|
167 |
+
encoder.layers.2.blocks.2.attention.self.key
|
168 |
+
encoder.layers.2.blocks.2.attention.self.value
|
169 |
+
encoder.layers.2.blocks.2.attention.self.dropout
|
170 |
+
encoder.layers.2.blocks.2.attention.output
|
171 |
+
encoder.layers.2.blocks.2.attention.output.dense
|
172 |
+
encoder.layers.2.blocks.2.attention.output.dropout
|
173 |
+
encoder.layers.2.blocks.2.layernorm_before
|
174 |
+
encoder.layers.2.blocks.2.drop_path
|
175 |
+
encoder.layers.2.blocks.2.intermediate
|
176 |
+
encoder.layers.2.blocks.2.intermediate.dense
|
177 |
+
encoder.layers.2.blocks.2.intermediate.intermediate_act_fn
|
178 |
+
encoder.layers.2.blocks.2.output
|
179 |
+
encoder.layers.2.blocks.2.output.dense
|
180 |
+
encoder.layers.2.blocks.2.output.dropout
|
181 |
+
encoder.layers.2.blocks.2.layernorm_after
|
182 |
+
encoder.layers.2.blocks.3
|
183 |
+
encoder.layers.2.blocks.3.attention
|
184 |
+
encoder.layers.2.blocks.3.attention.self
|
185 |
+
encoder.layers.2.blocks.3.attention.self.continuous_position_bias_mlp
|
186 |
+
encoder.layers.2.blocks.3.attention.self.continuous_position_bias_mlp.0
|
187 |
+
encoder.layers.2.blocks.3.attention.self.continuous_position_bias_mlp.1
|
188 |
+
encoder.layers.2.blocks.3.attention.self.continuous_position_bias_mlp.2
|
189 |
+
encoder.layers.2.blocks.3.attention.self.query
|
190 |
+
encoder.layers.2.blocks.3.attention.self.key
|
191 |
+
encoder.layers.2.blocks.3.attention.self.value
|
192 |
+
encoder.layers.2.blocks.3.attention.self.dropout
|
193 |
+
encoder.layers.2.blocks.3.attention.output
|
194 |
+
encoder.layers.2.blocks.3.attention.output.dense
|
195 |
+
encoder.layers.2.blocks.3.attention.output.dropout
|
196 |
+
encoder.layers.2.blocks.3.layernorm_before
|
197 |
+
encoder.layers.2.blocks.3.drop_path
|
198 |
+
encoder.layers.2.blocks.3.intermediate
|
199 |
+
encoder.layers.2.blocks.3.intermediate.dense
|
200 |
+
encoder.layers.2.blocks.3.intermediate.intermediate_act_fn
|
201 |
+
encoder.layers.2.blocks.3.output
|
202 |
+
encoder.layers.2.blocks.3.output.dense
|
203 |
+
encoder.layers.2.blocks.3.output.dropout
|
204 |
+
encoder.layers.2.blocks.3.layernorm_after
|
205 |
+
encoder.layers.2.blocks.4
|
206 |
+
encoder.layers.2.blocks.4.attention
|
207 |
+
encoder.layers.2.blocks.4.attention.self
|
208 |
+
encoder.layers.2.blocks.4.attention.self.continuous_position_bias_mlp
|
209 |
+
encoder.layers.2.blocks.4.attention.self.continuous_position_bias_mlp.0
|
210 |
+
encoder.layers.2.blocks.4.attention.self.continuous_position_bias_mlp.1
|
211 |
+
encoder.layers.2.blocks.4.attention.self.continuous_position_bias_mlp.2
|
212 |
+
encoder.layers.2.blocks.4.attention.self.query
|
213 |
+
encoder.layers.2.blocks.4.attention.self.key
|
214 |
+
encoder.layers.2.blocks.4.attention.self.value
|
215 |
+
encoder.layers.2.blocks.4.attention.self.dropout
|
216 |
+
encoder.layers.2.blocks.4.attention.output
|
217 |
+
encoder.layers.2.blocks.4.attention.output.dense
|
218 |
+
encoder.layers.2.blocks.4.attention.output.dropout
|
219 |
+
encoder.layers.2.blocks.4.layernorm_before
|
220 |
+
encoder.layers.2.blocks.4.drop_path
|
221 |
+
encoder.layers.2.blocks.4.intermediate
|
222 |
+
encoder.layers.2.blocks.4.intermediate.dense
|
223 |
+
encoder.layers.2.blocks.4.intermediate.intermediate_act_fn
|
224 |
+
encoder.layers.2.blocks.4.output
|
225 |
+
encoder.layers.2.blocks.4.output.dense
|
226 |
+
encoder.layers.2.blocks.4.output.dropout
|
227 |
+
encoder.layers.2.blocks.4.layernorm_after
|
228 |
+
encoder.layers.2.blocks.5
|
229 |
+
encoder.layers.2.blocks.5.attention
|
230 |
+
encoder.layers.2.blocks.5.attention.self
|
231 |
+
encoder.layers.2.blocks.5.attention.self.continuous_position_bias_mlp
|
232 |
+
encoder.layers.2.blocks.5.attention.self.continuous_position_bias_mlp.0
|
233 |
+
encoder.layers.2.blocks.5.attention.self.continuous_position_bias_mlp.1
|
234 |
+
encoder.layers.2.blocks.5.attention.self.continuous_position_bias_mlp.2
|
235 |
+
encoder.layers.2.blocks.5.attention.self.query
|
236 |
+
encoder.layers.2.blocks.5.attention.self.key
|
237 |
+
encoder.layers.2.blocks.5.attention.self.value
|
238 |
+
encoder.layers.2.blocks.5.attention.self.dropout
|
239 |
+
encoder.layers.2.blocks.5.attention.output
|
240 |
+
encoder.layers.2.blocks.5.attention.output.dense
|
241 |
+
encoder.layers.2.blocks.5.attention.output.dropout
|
242 |
+
encoder.layers.2.blocks.5.layernorm_before
|
243 |
+
encoder.layers.2.blocks.5.drop_path
|
244 |
+
encoder.layers.2.blocks.5.intermediate
|
245 |
+
encoder.layers.2.blocks.5.intermediate.dense
|
246 |
+
encoder.layers.2.blocks.5.intermediate.intermediate_act_fn
|
247 |
+
encoder.layers.2.blocks.5.output
|
248 |
+
encoder.layers.2.blocks.5.output.dense
|
249 |
+
encoder.layers.2.blocks.5.output.dropout
|
250 |
+
encoder.layers.2.blocks.5.layernorm_after
|
251 |
+
encoder.layers.2.blocks.6
|
252 |
+
encoder.layers.2.blocks.6.attention
|
253 |
+
encoder.layers.2.blocks.6.attention.self
|
254 |
+
encoder.layers.2.blocks.6.attention.self.continuous_position_bias_mlp
|
255 |
+
encoder.layers.2.blocks.6.attention.self.continuous_position_bias_mlp.0
|
256 |
+
encoder.layers.2.blocks.6.attention.self.continuous_position_bias_mlp.1
|
257 |
+
encoder.layers.2.blocks.6.attention.self.continuous_position_bias_mlp.2
|
258 |
+
encoder.layers.2.blocks.6.attention.self.query
|
259 |
+
encoder.layers.2.blocks.6.attention.self.key
|
260 |
+
encoder.layers.2.blocks.6.attention.self.value
|
261 |
+
encoder.layers.2.blocks.6.attention.self.dropout
|
262 |
+
encoder.layers.2.blocks.6.attention.output
|
263 |
+
encoder.layers.2.blocks.6.attention.output.dense
|
264 |
+
encoder.layers.2.blocks.6.attention.output.dropout
|
265 |
+
encoder.layers.2.blocks.6.layernorm_before
|
266 |
+
encoder.layers.2.blocks.6.drop_path
|
267 |
+
encoder.layers.2.blocks.6.intermediate
|
268 |
+
encoder.layers.2.blocks.6.intermediate.dense
|
269 |
+
encoder.layers.2.blocks.6.intermediate.intermediate_act_fn
|
270 |
+
encoder.layers.2.blocks.6.output
|
271 |
+
encoder.layers.2.blocks.6.output.dense
|
272 |
+
encoder.layers.2.blocks.6.output.dropout
|
273 |
+
encoder.layers.2.blocks.6.layernorm_after
|
274 |
+
encoder.layers.2.blocks.7
|
275 |
+
encoder.layers.2.blocks.7.attention
|
276 |
+
encoder.layers.2.blocks.7.attention.self
|
277 |
+
encoder.layers.2.blocks.7.attention.self.continuous_position_bias_mlp
|
278 |
+
encoder.layers.2.blocks.7.attention.self.continuous_position_bias_mlp.0
|
279 |
+
encoder.layers.2.blocks.7.attention.self.continuous_position_bias_mlp.1
|
280 |
+
encoder.layers.2.blocks.7.attention.self.continuous_position_bias_mlp.2
|
281 |
+
encoder.layers.2.blocks.7.attention.self.query
|
282 |
+
encoder.layers.2.blocks.7.attention.self.key
|
283 |
+
encoder.layers.2.blocks.7.attention.self.value
|
284 |
+
encoder.layers.2.blocks.7.attention.self.dropout
|
285 |
+
encoder.layers.2.blocks.7.attention.output
|
286 |
+
encoder.layers.2.blocks.7.attention.output.dense
|
287 |
+
encoder.layers.2.blocks.7.attention.output.dropout
|
288 |
+
encoder.layers.2.blocks.7.layernorm_before
|
289 |
+
encoder.layers.2.blocks.7.drop_path
|
290 |
+
encoder.layers.2.blocks.7.intermediate
|
291 |
+
encoder.layers.2.blocks.7.intermediate.dense
|
292 |
+
encoder.layers.2.blocks.7.intermediate.intermediate_act_fn
|
293 |
+
encoder.layers.2.blocks.7.output
|
294 |
+
encoder.layers.2.blocks.7.output.dense
|
295 |
+
encoder.layers.2.blocks.7.output.dropout
|
296 |
+
encoder.layers.2.blocks.7.layernorm_after
|
297 |
+
encoder.layers.2.blocks.8
|
298 |
+
encoder.layers.2.blocks.8.attention
|
299 |
+
encoder.layers.2.blocks.8.attention.self
|
300 |
+
encoder.layers.2.blocks.8.attention.self.continuous_position_bias_mlp
|
301 |
+
encoder.layers.2.blocks.8.attention.self.continuous_position_bias_mlp.0
|
302 |
+
encoder.layers.2.blocks.8.attention.self.continuous_position_bias_mlp.1
|
303 |
+
encoder.layers.2.blocks.8.attention.self.continuous_position_bias_mlp.2
|
304 |
+
encoder.layers.2.blocks.8.attention.self.query
|
305 |
+
encoder.layers.2.blocks.8.attention.self.key
|
306 |
+
encoder.layers.2.blocks.8.attention.self.value
|
307 |
+
encoder.layers.2.blocks.8.attention.self.dropout
|
308 |
+
encoder.layers.2.blocks.8.attention.output
|
309 |
+
encoder.layers.2.blocks.8.attention.output.dense
|
310 |
+
encoder.layers.2.blocks.8.attention.output.dropout
|
311 |
+
encoder.layers.2.blocks.8.layernorm_before
|
312 |
+
encoder.layers.2.blocks.8.drop_path
|
313 |
+
encoder.layers.2.blocks.8.intermediate
|
314 |
+
encoder.layers.2.blocks.8.intermediate.dense
|
315 |
+
encoder.layers.2.blocks.8.intermediate.intermediate_act_fn
|
316 |
+
encoder.layers.2.blocks.8.output
|
317 |
+
encoder.layers.2.blocks.8.output.dense
|
318 |
+
encoder.layers.2.blocks.8.output.dropout
|
319 |
+
encoder.layers.2.blocks.8.layernorm_after
|
320 |
+
encoder.layers.2.blocks.9
|
321 |
+
encoder.layers.2.blocks.9.attention
|
322 |
+
encoder.layers.2.blocks.9.attention.self
|
323 |
+
encoder.layers.2.blocks.9.attention.self.continuous_position_bias_mlp
|
324 |
+
encoder.layers.2.blocks.9.attention.self.continuous_position_bias_mlp.0
|
325 |
+
encoder.layers.2.blocks.9.attention.self.continuous_position_bias_mlp.1
|
326 |
+
encoder.layers.2.blocks.9.attention.self.continuous_position_bias_mlp.2
|
327 |
+
encoder.layers.2.blocks.9.attention.self.query
|
328 |
+
encoder.layers.2.blocks.9.attention.self.key
|
329 |
+
encoder.layers.2.blocks.9.attention.self.value
|
330 |
+
encoder.layers.2.blocks.9.attention.self.dropout
|
331 |
+
encoder.layers.2.blocks.9.attention.output
|
332 |
+
encoder.layers.2.blocks.9.attention.output.dense
|
333 |
+
encoder.layers.2.blocks.9.attention.output.dropout
|
334 |
+
encoder.layers.2.blocks.9.layernorm_before
|
335 |
+
encoder.layers.2.blocks.9.drop_path
|
336 |
+
encoder.layers.2.blocks.9.intermediate
|
337 |
+
encoder.layers.2.blocks.9.intermediate.dense
|
338 |
+
encoder.layers.2.blocks.9.intermediate.intermediate_act_fn
|
339 |
+
encoder.layers.2.blocks.9.output
|
340 |
+
encoder.layers.2.blocks.9.output.dense
|
341 |
+
encoder.layers.2.blocks.9.output.dropout
|
342 |
+
encoder.layers.2.blocks.9.layernorm_after
|
343 |
+
encoder.layers.2.blocks.10
|
344 |
+
encoder.layers.2.blocks.10.attention
|
345 |
+
encoder.layers.2.blocks.10.attention.self
|
346 |
+
encoder.layers.2.blocks.10.attention.self.continuous_position_bias_mlp
|
347 |
+
encoder.layers.2.blocks.10.attention.self.continuous_position_bias_mlp.0
|
348 |
+
encoder.layers.2.blocks.10.attention.self.continuous_position_bias_mlp.1
|
349 |
+
encoder.layers.2.blocks.10.attention.self.continuous_position_bias_mlp.2
|
350 |
+
encoder.layers.2.blocks.10.attention.self.query
|
351 |
+
encoder.layers.2.blocks.10.attention.self.key
|
352 |
+
encoder.layers.2.blocks.10.attention.self.value
|
353 |
+
encoder.layers.2.blocks.10.attention.self.dropout
|
354 |
+
encoder.layers.2.blocks.10.attention.output
|
355 |
+
encoder.layers.2.blocks.10.attention.output.dense
|
356 |
+
encoder.layers.2.blocks.10.attention.output.dropout
|
357 |
+
encoder.layers.2.blocks.10.layernorm_before
|
358 |
+
encoder.layers.2.blocks.10.drop_path
|
359 |
+
encoder.layers.2.blocks.10.intermediate
|
360 |
+
encoder.layers.2.blocks.10.intermediate.dense
|
361 |
+
encoder.layers.2.blocks.10.intermediate.intermediate_act_fn
|
362 |
+
encoder.layers.2.blocks.10.output
|
363 |
+
encoder.layers.2.blocks.10.output.dense
|
364 |
+
encoder.layers.2.blocks.10.output.dropout
|
365 |
+
encoder.layers.2.blocks.10.layernorm_after
|
366 |
+
encoder.layers.2.blocks.11
|
367 |
+
encoder.layers.2.blocks.11.attention
|
368 |
+
encoder.layers.2.blocks.11.attention.self
|
369 |
+
encoder.layers.2.blocks.11.attention.self.continuous_position_bias_mlp
|
370 |
+
encoder.layers.2.blocks.11.attention.self.continuous_position_bias_mlp.0
|
371 |
+
encoder.layers.2.blocks.11.attention.self.continuous_position_bias_mlp.1
|
372 |
+
encoder.layers.2.blocks.11.attention.self.continuous_position_bias_mlp.2
|
373 |
+
encoder.layers.2.blocks.11.attention.self.query
|
374 |
+
encoder.layers.2.blocks.11.attention.self.key
|
375 |
+
encoder.layers.2.blocks.11.attention.self.value
|
376 |
+
encoder.layers.2.blocks.11.attention.self.dropout
|
377 |
+
encoder.layers.2.blocks.11.attention.output
|
378 |
+
encoder.layers.2.blocks.11.attention.output.dense
|
379 |
+
encoder.layers.2.blocks.11.attention.output.dropout
|
380 |
+
encoder.layers.2.blocks.11.layernorm_before
|
381 |
+
encoder.layers.2.blocks.11.drop_path
|
382 |
+
encoder.layers.2.blocks.11.intermediate
|
383 |
+
encoder.layers.2.blocks.11.intermediate.dense
|
384 |
+
encoder.layers.2.blocks.11.intermediate.intermediate_act_fn
|
385 |
+
encoder.layers.2.blocks.11.output
|
386 |
+
encoder.layers.2.blocks.11.output.dense
|
387 |
+
encoder.layers.2.blocks.11.output.dropout
|
388 |
+
encoder.layers.2.blocks.11.layernorm_after
|
389 |
+
encoder.layers.2.blocks.12
|
390 |
+
encoder.layers.2.blocks.12.attention
|
391 |
+
encoder.layers.2.blocks.12.attention.self
|
392 |
+
encoder.layers.2.blocks.12.attention.self.continuous_position_bias_mlp
|
393 |
+
encoder.layers.2.blocks.12.attention.self.continuous_position_bias_mlp.0
|
394 |
+
encoder.layers.2.blocks.12.attention.self.continuous_position_bias_mlp.1
|
395 |
+
encoder.layers.2.blocks.12.attention.self.continuous_position_bias_mlp.2
|
396 |
+
encoder.layers.2.blocks.12.attention.self.query
|
397 |
+
encoder.layers.2.blocks.12.attention.self.key
|
398 |
+
encoder.layers.2.blocks.12.attention.self.value
|
399 |
+
encoder.layers.2.blocks.12.attention.self.dropout
|
400 |
+
encoder.layers.2.blocks.12.attention.output
|
401 |
+
encoder.layers.2.blocks.12.attention.output.dense
|
402 |
+
encoder.layers.2.blocks.12.attention.output.dropout
|
403 |
+
encoder.layers.2.blocks.12.layernorm_before
|
404 |
+
encoder.layers.2.blocks.12.drop_path
|
405 |
+
encoder.layers.2.blocks.12.intermediate
|
406 |
+
encoder.layers.2.blocks.12.intermediate.dense
|
407 |
+
encoder.layers.2.blocks.12.intermediate.intermediate_act_fn
|
408 |
+
encoder.layers.2.blocks.12.output
|
409 |
+
encoder.layers.2.blocks.12.output.dense
|
410 |
+
encoder.layers.2.blocks.12.output.dropout
|
411 |
+
encoder.layers.2.blocks.12.layernorm_after
|
412 |
+
encoder.layers.2.blocks.13
|
413 |
+
encoder.layers.2.blocks.13.attention
|
414 |
+
encoder.layers.2.blocks.13.attention.self
|
415 |
+
encoder.layers.2.blocks.13.attention.self.continuous_position_bias_mlp
|
416 |
+
encoder.layers.2.blocks.13.attention.self.continuous_position_bias_mlp.0
|
417 |
+
encoder.layers.2.blocks.13.attention.self.continuous_position_bias_mlp.1
|
418 |
+
encoder.layers.2.blocks.13.attention.self.continuous_position_bias_mlp.2
|
419 |
+
encoder.layers.2.blocks.13.attention.self.query
|
420 |
+
encoder.layers.2.blocks.13.attention.self.key
|
421 |
+
encoder.layers.2.blocks.13.attention.self.value
|
422 |
+
encoder.layers.2.blocks.13.attention.self.dropout
|
423 |
+
encoder.layers.2.blocks.13.attention.output
|
424 |
+
encoder.layers.2.blocks.13.attention.output.dense
|
425 |
+
encoder.layers.2.blocks.13.attention.output.dropout
|
426 |
+
encoder.layers.2.blocks.13.layernorm_before
|
427 |
+
encoder.layers.2.blocks.13.drop_path
|
428 |
+
encoder.layers.2.blocks.13.intermediate
|
429 |
+
encoder.layers.2.blocks.13.intermediate.dense
|
430 |
+
encoder.layers.2.blocks.13.intermediate.intermediate_act_fn
|
431 |
+
encoder.layers.2.blocks.13.output
|
432 |
+
encoder.layers.2.blocks.13.output.dense
|
433 |
+
encoder.layers.2.blocks.13.output.dropout
|
434 |
+
encoder.layers.2.blocks.13.layernorm_after
|
435 |
+
encoder.layers.2.blocks.14
|
436 |
+
encoder.layers.2.blocks.14.attention
|
437 |
+
encoder.layers.2.blocks.14.attention.self
|
438 |
+
encoder.layers.2.blocks.14.attention.self.continuous_position_bias_mlp
|
439 |
+
encoder.layers.2.blocks.14.attention.self.continuous_position_bias_mlp.0
|
440 |
+
encoder.layers.2.blocks.14.attention.self.continuous_position_bias_mlp.1
|
441 |
+
encoder.layers.2.blocks.14.attention.self.continuous_position_bias_mlp.2
|
442 |
+
encoder.layers.2.blocks.14.attention.self.query
|
443 |
+
encoder.layers.2.blocks.14.attention.self.key
|
444 |
+
encoder.layers.2.blocks.14.attention.self.value
|
445 |
+
encoder.layers.2.blocks.14.attention.self.dropout
|
446 |
+
encoder.layers.2.blocks.14.attention.output
|
447 |
+
encoder.layers.2.blocks.14.attention.output.dense
|
448 |
+
encoder.layers.2.blocks.14.attention.output.dropout
|
449 |
+
encoder.layers.2.blocks.14.layernorm_before
|
450 |
+
encoder.layers.2.blocks.14.drop_path
|
451 |
+
encoder.layers.2.blocks.14.intermediate
|
452 |
+
encoder.layers.2.blocks.14.intermediate.dense
|
453 |
+
encoder.layers.2.blocks.14.intermediate.intermediate_act_fn
|
454 |
+
encoder.layers.2.blocks.14.output
|
455 |
+
encoder.layers.2.blocks.14.output.dense
|
456 |
+
encoder.layers.2.blocks.14.output.dropout
|
457 |
+
encoder.layers.2.blocks.14.layernorm_after
|
458 |
+
encoder.layers.2.blocks.15
|
459 |
+
encoder.layers.2.blocks.15.attention
|
460 |
+
encoder.layers.2.blocks.15.attention.self
|
461 |
+
encoder.layers.2.blocks.15.attention.self.continuous_position_bias_mlp
|
462 |
+
encoder.layers.2.blocks.15.attention.self.continuous_position_bias_mlp.0
|
463 |
+
encoder.layers.2.blocks.15.attention.self.continuous_position_bias_mlp.1
|
464 |
+
encoder.layers.2.blocks.15.attention.self.continuous_position_bias_mlp.2
|
465 |
+
encoder.layers.2.blocks.15.attention.self.query
|
466 |
+
encoder.layers.2.blocks.15.attention.self.key
|
467 |
+
encoder.layers.2.blocks.15.attention.self.value
|
468 |
+
encoder.layers.2.blocks.15.attention.self.dropout
|
469 |
+
encoder.layers.2.blocks.15.attention.output
|
470 |
+
encoder.layers.2.blocks.15.attention.output.dense
|
471 |
+
encoder.layers.2.blocks.15.attention.output.dropout
|
472 |
+
encoder.layers.2.blocks.15.layernorm_before
|
473 |
+
encoder.layers.2.blocks.15.drop_path
|
474 |
+
encoder.layers.2.blocks.15.intermediate
|
475 |
+
encoder.layers.2.blocks.15.intermediate.dense
|
476 |
+
encoder.layers.2.blocks.15.intermediate.intermediate_act_fn
|
477 |
+
encoder.layers.2.blocks.15.output
|
478 |
+
encoder.layers.2.blocks.15.output.dense
|
479 |
+
encoder.layers.2.blocks.15.output.dropout
|
480 |
+
encoder.layers.2.blocks.15.layernorm_after
|
481 |
+
encoder.layers.2.blocks.16
|
482 |
+
encoder.layers.2.blocks.16.attention
|
483 |
+
encoder.layers.2.blocks.16.attention.self
|
484 |
+
encoder.layers.2.blocks.16.attention.self.continuous_position_bias_mlp
|
485 |
+
encoder.layers.2.blocks.16.attention.self.continuous_position_bias_mlp.0
|
486 |
+
encoder.layers.2.blocks.16.attention.self.continuous_position_bias_mlp.1
|
487 |
+
encoder.layers.2.blocks.16.attention.self.continuous_position_bias_mlp.2
|
488 |
+
encoder.layers.2.blocks.16.attention.self.query
|
489 |
+
encoder.layers.2.blocks.16.attention.self.key
|
490 |
+
encoder.layers.2.blocks.16.attention.self.value
|
491 |
+
encoder.layers.2.blocks.16.attention.self.dropout
|
492 |
+
encoder.layers.2.blocks.16.attention.output
|
493 |
+
encoder.layers.2.blocks.16.attention.output.dense
|
494 |
+
encoder.layers.2.blocks.16.attention.output.dropout
|
495 |
+
encoder.layers.2.blocks.16.layernorm_before
|
496 |
+
encoder.layers.2.blocks.16.drop_path
|
497 |
+
encoder.layers.2.blocks.16.intermediate
|
498 |
+
encoder.layers.2.blocks.16.intermediate.dense
|
499 |
+
encoder.layers.2.blocks.16.intermediate.intermediate_act_fn
|
500 |
+
encoder.layers.2.blocks.16.output
|
501 |
+
encoder.layers.2.blocks.16.output.dense
|
502 |
+
encoder.layers.2.blocks.16.output.dropout
|
503 |
+
encoder.layers.2.blocks.16.layernorm_after
|
504 |
+
encoder.layers.2.blocks.17
|
505 |
+
encoder.layers.2.blocks.17.attention
|
506 |
+
encoder.layers.2.blocks.17.attention.self
|
507 |
+
encoder.layers.2.blocks.17.attention.self.continuous_position_bias_mlp
|
508 |
+
encoder.layers.2.blocks.17.attention.self.continuous_position_bias_mlp.0
|
509 |
+
encoder.layers.2.blocks.17.attention.self.continuous_position_bias_mlp.1
|
510 |
+
encoder.layers.2.blocks.17.attention.self.continuous_position_bias_mlp.2
|
511 |
+
encoder.layers.2.blocks.17.attention.self.query
|
512 |
+
encoder.layers.2.blocks.17.attention.self.key
|
513 |
+
encoder.layers.2.blocks.17.attention.self.value
|
514 |
+
encoder.layers.2.blocks.17.attention.self.dropout
|
515 |
+
encoder.layers.2.blocks.17.attention.output
|
516 |
+
encoder.layers.2.blocks.17.attention.output.dense
|
517 |
+
encoder.layers.2.blocks.17.attention.output.dropout
|
518 |
+
encoder.layers.2.blocks.17.layernorm_before
|
519 |
+
encoder.layers.2.blocks.17.drop_path
|
520 |
+
encoder.layers.2.blocks.17.intermediate
|
521 |
+
encoder.layers.2.blocks.17.intermediate.dense
|
522 |
+
encoder.layers.2.blocks.17.intermediate.intermediate_act_fn
|
523 |
+
encoder.layers.2.blocks.17.output
|
524 |
+
encoder.layers.2.blocks.17.output.dense
|
525 |
+
encoder.layers.2.blocks.17.output.dropout
|
526 |
+
encoder.layers.2.blocks.17.layernorm_after
|
527 |
+
encoder.layers.2.downsample
|
528 |
+
encoder.layers.2.downsample.reduction
|
529 |
+
encoder.layers.2.downsample.norm
|
530 |
+
encoder.layers.3
|
531 |
+
encoder.layers.3.blocks
|
532 |
+
encoder.layers.3.blocks.0
|
533 |
+
encoder.layers.3.blocks.0.attention
|
534 |
+
encoder.layers.3.blocks.0.attention.self
|
535 |
+
encoder.layers.3.blocks.0.attention.self.continuous_position_bias_mlp
|
536 |
+
encoder.layers.3.blocks.0.attention.self.continuous_position_bias_mlp.0
|
537 |
+
encoder.layers.3.blocks.0.attention.self.continuous_position_bias_mlp.1
|
538 |
+
encoder.layers.3.blocks.0.attention.self.continuous_position_bias_mlp.2
|
539 |
+
encoder.layers.3.blocks.0.attention.self.query
|
540 |
+
encoder.layers.3.blocks.0.attention.self.key
|
541 |
+
encoder.layers.3.blocks.0.attention.self.value
|
542 |
+
encoder.layers.3.blocks.0.attention.self.dropout
|
543 |
+
encoder.layers.3.blocks.0.attention.output
|
544 |
+
encoder.layers.3.blocks.0.attention.output.dense
|
545 |
+
encoder.layers.3.blocks.0.attention.output.dropout
|
546 |
+
encoder.layers.3.blocks.0.layernorm_before
|
547 |
+
encoder.layers.3.blocks.0.drop_path
|
548 |
+
encoder.layers.3.blocks.0.intermediate
|
549 |
+
encoder.layers.3.blocks.0.intermediate.dense
|
550 |
+
encoder.layers.3.blocks.0.intermediate.intermediate_act_fn
|
551 |
+
encoder.layers.3.blocks.0.output
|
552 |
+
encoder.layers.3.blocks.0.output.dense
|
553 |
+
encoder.layers.3.blocks.0.output.dropout
|
554 |
+
encoder.layers.3.blocks.0.layernorm_after
|
555 |
+
encoder.layers.3.blocks.1
|
556 |
+
encoder.layers.3.blocks.1.attention
|
557 |
+
encoder.layers.3.blocks.1.attention.self
|
558 |
+
encoder.layers.3.blocks.1.attention.self.continuous_position_bias_mlp
|
559 |
+
encoder.layers.3.blocks.1.attention.self.continuous_position_bias_mlp.0
|
560 |
+
encoder.layers.3.blocks.1.attention.self.continuous_position_bias_mlp.1
|
561 |
+
encoder.layers.3.blocks.1.attention.self.continuous_position_bias_mlp.2
|
562 |
+
encoder.layers.3.blocks.1.attention.self.query
|
563 |
+
encoder.layers.3.blocks.1.attention.self.key
|
564 |
+
encoder.layers.3.blocks.1.attention.self.value
|
565 |
+
encoder.layers.3.blocks.1.attention.self.dropout
|
566 |
+
encoder.layers.3.blocks.1.attention.output
|
567 |
+
encoder.layers.3.blocks.1.attention.output.dense
|
568 |
+
encoder.layers.3.blocks.1.attention.output.dropout
|
569 |
+
encoder.layers.3.blocks.1.layernorm_before
|
570 |
+
encoder.layers.3.blocks.1.drop_path
|
571 |
+
encoder.layers.3.blocks.1.intermediate
|
572 |
+
encoder.layers.3.blocks.1.intermediate.dense
|
573 |
+
encoder.layers.3.blocks.1.intermediate.intermediate_act_fn
|
574 |
+
encoder.layers.3.blocks.1.output
|
575 |
+
encoder.layers.3.blocks.1.output.dense
|
576 |
+
encoder.layers.3.blocks.1.output.dropout
|
577 |
+
encoder.layers.3.blocks.1.layernorm_after
|
578 |
+
layernorm
|
579 |
+
pooler
|