# mean and std in RGB order, actually this part should remain unchanged as long as your dataset is similar to coco. mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] # this is coco anchors, change it if necessary #anchors_scales: '[2 ** 0, 2 ** 1/3, 2 ** 2/3]' #anchors_ratios: '[(0.7, 1.4), (1.0, 1.0), (1.4, 0.7)]' #anchors_scales: '[5.125, 0.625, 1.625]' #anchors_ratios: '[(1, 0.7317073170731707), (1, 0.85), (1, 0.7884615384615384)]' anchors_scales: '[2**0, 2**0.70, 2**1.32]' anchors_ratios: '[(0.62, 1.58), (1.0, 1.0), (1.58, 0.62)]' # must match your dataset's category_id. # category_id is one_indexed, # for example, index of 'car' here is 2, while category_id of is 3 #obj_list: ['pedestrian', # 'rider', # 'car', # 'truck', # 'bus', # 'train', # 'motorcycle', # 'bicycle', # 'traffic light', # 'traffic sign'] obj_list: ['car'] seg_list: ['road', 'lane'] dataset: color_rgb: false dataroot: ./datasets/bdd100k labelroot: ./datasets/data2/zwt/bdd/bdd100k/labels/100k laneroot: ./datasets/bdd_lane_gt maskroot: ./datasets/bdd_seg_gt data_format: jpg flip: true hsv_h: 0.015 hsv_s: 0.7 hsv_v: 0.4 org_img_size: - 720 - 1280 rot_factor: 10 scale_factor: 0.25 shear: 0.0 test_set: val train_set: train translate: 0.1 model: image_size: - 640 - 384 need_autoanchor: false pin_memory: false num_seg_class: 2