Spaces:
Sleeping
Sleeping
File size: 4,722 Bytes
24c4def |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
dictionary = dict(
type='Dictionary',
dict_file='{{ fileDirname }}/../../../dicts/lower_english_digits.txt',
with_padding=True,
with_unknown=True,
)
model = dict(
type='SVTR',
preprocessor=dict(
type='STN',
in_channels=3,
resized_image_size=(32, 64),
output_image_size=(32, 100),
num_control_points=20,
margins=[0.05, 0.05]),
encoder=dict(
type='SVTREncoder',
img_size=[32, 100],
in_channels=3,
out_channels=192,
embed_dims=[64, 128, 256],
depth=[3, 6, 3],
num_heads=[2, 4, 8],
mixer_types=['Local'] * 6 + ['Global'] * 6,
window_size=[[7, 11], [7, 11], [7, 11]],
merging_types='Conv',
prenorm=False,
max_seq_len=25),
decoder=dict(
type='SVTRDecoder',
in_channels=192,
module_loss=dict(
type='CTCModuleLoss', letter_case='lower', zero_infinity=True),
postprocessor=dict(type='CTCPostProcessor'),
dictionary=dictionary),
data_preprocessor=dict(
type='TextRecogDataPreprocessor', mean=[127.5], std=[127.5]))
train_pipeline = [
dict(type='LoadImageFromFile', ignore_empty=True, min_size=5),
dict(type='LoadOCRAnnotations', with_text=True),
dict(
type='RandomApply',
prob=0.4,
transforms=[
dict(type='TextRecogGeneralAug', ),
],
),
dict(
type='RandomApply',
prob=0.4,
transforms=[
dict(type='CropHeight', ),
],
),
dict(
type='ConditionApply',
condition='min(results["img_shape"])>10',
true_transforms=dict(
type='RandomApply',
prob=0.4,
transforms=[
dict(
type='TorchVisionWrapper',
op='GaussianBlur',
kernel_size=5,
sigma=1,
),
],
)),
dict(
type='RandomApply',
prob=0.4,
transforms=[
dict(
type='TorchVisionWrapper',
op='ColorJitter',
brightness=0.5,
saturation=0.5,
contrast=0.5,
hue=0.1),
]),
dict(
type='RandomApply',
prob=0.4,
transforms=[
dict(type='ImageContentJitter', ),
],
),
dict(
type='RandomApply',
prob=0.4,
transforms=[
dict(
type='ImgAugWrapper',
args=[dict(cls='AdditiveGaussianNoise', scale=0.1**0.5)]),
],
),
dict(
type='RandomApply',
prob=0.4,
transforms=[
dict(type='ReversePixels', ),
],
),
dict(type='Resize', scale=(256, 64)),
dict(
type='PackTextRecogInputs',
meta_keys=('img_path', 'ori_shape', 'img_shape', 'valid_ratio'))
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(256, 64)),
dict(type='LoadOCRAnnotations', with_text=True),
dict(
type='PackTextRecogInputs',
meta_keys=('img_path', 'ori_shape', 'img_shape', 'valid_ratio'))
]
tta_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='TestTimeAug',
transforms=[[
dict(
type='ConditionApply',
true_transforms=[
dict(
type='ImgAugWrapper',
args=[dict(cls='Rot90', k=0, keep_size=False)])
],
condition="results['img_shape'][1]<results['img_shape'][0]"),
dict(
type='ConditionApply',
true_transforms=[
dict(
type='ImgAugWrapper',
args=[dict(cls='Rot90', k=1, keep_size=False)])
],
condition="results['img_shape'][1]<results['img_shape'][0]"),
dict(
type='ConditionApply',
true_transforms=[
dict(
type='ImgAugWrapper',
args=[dict(cls='Rot90', k=3, keep_size=False)])
],
condition="results['img_shape'][1]<results['img_shape'][0]"),
], [dict(type='Resize', scale=(256, 64))],
[dict(type='LoadOCRAnnotations', with_text=True)],
[
dict(
type='PackTextRecogInputs',
meta_keys=('img_path', 'ori_shape', 'img_shape',
'valid_ratio'))
]])
]
|