AlekseyCalvin commited on
Commit
4a174f8
·
verified ·
1 Parent(s): a60d56c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -54,15 +54,15 @@ torch.cuda.empty_cache()
54
 
55
  device = "cuda" if torch.cuda.is_available() else "cpu"
56
 
57
- model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
58
- config = CLIPConfig.from_pretrained(model_id)
59
- config.text_config.max_position_embeddings = 77
60
- clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True)
61
- clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=77)
62
- pipe.tokenizer = clip_processor.tokenizer
63
- pipe.text_encoder = clip_model.text_model
64
- pipe.tokenizer_max_length = 77
65
- pipe.text_encoder.dtype = torch.bfloat16
66
 
67
  #clipmodel = 'norm'
68
  #if clipmodel == "long":
 
54
 
55
  device = "cuda" if torch.cuda.is_available() else "cpu"
56
 
57
+ #model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
58
+ #config = CLIPConfig.from_pretrained(model_id)
59
+ #config.text_config.max_position_embeddings = 77
60
+ #clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True)
61
+ #clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=77)
62
+ #pipe.tokenizer = clip_processor.tokenizer
63
+ #pipe.text_encoder = clip_model.text_model
64
+ #pipe.tokenizer_max_length = 77
65
+ #pipe.text_encoder.dtype = torch.bfloat16
66
 
67
  #clipmodel = 'norm'
68
  #if clipmodel == "long":