prasadnu commited on
Commit
f66d8c3
·
1 Parent(s): 9663edc

multilingual

Browse files
Files changed (1) hide show
  1. semantic_search/all_search_execute.py +36 -40
semantic_search/all_search_execute.py CHANGED
@@ -160,21 +160,21 @@ def handler(input_,session_id):
160
  print("Hybrid Rerank Search Pipeline updated: "+str(r.status_code))
161
 
162
  ######## Updating opensearch_translation_pipeline Search pipeline #######
163
- opensearch_translation_pipeline = json.loads((requests.get(host+'_search/pipeline/ml_inference_for_vector_search_and_language_translation', auth=awsauth,headers=headers)).text)
164
- path = "_search/pipeline/ml_inference_for_vector_search_and_language_translation"
165
- url = host + path
166
- opensearch_translation_pipeline["ml_inference_for_vector_search_and_language_translation"]["phase_results_processors"] = hybrid_search_processor
167
- print(opensearch_translation_pipeline)
168
- r = requests.put(url, auth=awsauth, json=opensearch_translation_pipeline["ml_inference_for_vector_search_and_language_translation"], headers=headers)
169
- print("translation hybrid Search Pipeline updated: "+str(r.status_code))
170
 
171
  ######## Updating opensearch_translation_pipeline_with_rerank Search pipeline #######
172
- opensearch_translation_pipeline_with_rerank = json.loads((requests.get(host+'_search/pipeline/ml_inference_for_vector_search_and_language_translation_with_rerank', auth=awsauth,headers=headers)).text)
173
- path = "_search/pipeline/ml_inference_for_vector_search_and_language_translation_with_rerank"
174
- url = host + path
175
- opensearch_translation_pipeline_with_rerank["ml_inference_for_vector_search_and_language_translation_with_rerank"]["phase_results_processors"] = hybrid_search_processor
176
- r = requests.put(url, auth=awsauth, json=opensearch_translation_pipeline_with_rerank["ml_inference_for_vector_search_and_language_translation_with_rerank"], headers=headers)
177
- print("translation hybrid rerank Search Pipeline updated: "+str(r.status_code))
178
  ######## start of Applying LLM filters #######
179
  if(st.session_state.input_rewritten_query!=""):
180
  filter_ = {"filter": {
@@ -482,41 +482,37 @@ def handler(input_,session_id):
482
  }}
483
 
484
  r = requests.get(url, auth=awsauth, json=hybrid_payload, headers=headers)
485
- print("url: "+url)
486
- print("payload: "+json.dumps(hybrid_payload))
487
  response_ = json.loads(r.text)
488
  docs = response_['hits']['hits']
489
 
490
 
491
  else:
492
  if( st.session_state.input_hybridType == "OpenSearch Hybrid Query"):
493
- if(st.session_state.input_multilingual):
494
- if(st.session_state.re_ranker == 'true' and st.session_state.input_reranker == 'Cohere Rerank'):
495
- path = "demostore-search-index-reindex-new/_search?search_pipeline=ml_inference_for_vector_search_and_language_translation_with_rerank"
496
- url = host + path
497
- hybrid_payload["ext"] = {"rerank": {
498
- "query_context": {
499
- "query_text": query
500
- }
501
- }}
502
- else:
503
- path = "demostore-search-index-reindex-new/_search?search_pipeline=ml_inference_for_vector_search_and_language_translation"
504
- url = host + path
505
- else:
506
- path = "demostore-search-index-reindex-new/_search?search_pipeline=hybrid_search_pipeline"
 
 
 
 
507
  url = host + path
508
- if(st.session_state.re_ranker == 'true' and st.session_state.input_reranker == 'Cohere Rerank'):
509
-
510
- path = "demostore-search-index-reindex-new/_search?search_pipeline=hybrid_rerank_pipeline"
511
- url = host + path
512
- hybrid_payload["ext"] = {"rerank": {
513
- "query_context": {
514
- "query_text": query
515
- }
516
- }}
517
  r = requests.get(url, auth=awsauth, json=hybrid_payload, headers=headers)
518
- print(hybrid_payload)
519
- print(r.text)
520
  response_ = json.loads(r.text)
521
  docs = response_['hits']['hits']
522
 
 
160
  print("Hybrid Rerank Search Pipeline updated: "+str(r.status_code))
161
 
162
  ######## Updating opensearch_translation_pipeline Search pipeline #######
163
+ # opensearch_translation_pipeline = json.loads((requests.get(host+'_search/pipeline/ml_inference_for_vector_search_and_language_translation', auth=awsauth,headers=headers)).text)
164
+ # path = "_search/pipeline/ml_inference_for_vector_search_and_language_translation"
165
+ # url = host + path
166
+ # opensearch_translation_pipeline["ml_inference_for_vector_search_and_language_translation"]["phase_results_processors"] = hybrid_search_processor
167
+ # print(opensearch_translation_pipeline)
168
+ # r = requests.put(url, auth=awsauth, json=opensearch_translation_pipeline["ml_inference_for_vector_search_and_language_translation"], headers=headers)
169
+ # print("translation hybrid Search Pipeline updated: "+str(r.status_code))
170
 
171
  ######## Updating opensearch_translation_pipeline_with_rerank Search pipeline #######
172
+ # opensearch_translation_pipeline_with_rerank = json.loads((requests.get(host+'_search/pipeline/ml_inference_for_vector_search_and_language_translation_with_rerank', auth=awsauth,headers=headers)).text)
173
+ # path = "_search/pipeline/ml_inference_for_vector_search_and_language_translation_with_rerank"
174
+ # url = host + path
175
+ # opensearch_translation_pipeline_with_rerank["ml_inference_for_vector_search_and_language_translation_with_rerank"]["phase_results_processors"] = hybrid_search_processor
176
+ # r = requests.put(url, auth=awsauth, json=opensearch_translation_pipeline_with_rerank["ml_inference_for_vector_search_and_language_translation_with_rerank"], headers=headers)
177
+ # print("translation hybrid rerank Search Pipeline updated: "+str(r.status_code))
178
  ######## start of Applying LLM filters #######
179
  if(st.session_state.input_rewritten_query!=""):
180
  filter_ = {"filter": {
 
482
  }}
483
 
484
  r = requests.get(url, auth=awsauth, json=hybrid_payload, headers=headers)
 
 
485
  response_ = json.loads(r.text)
486
  docs = response_['hits']['hits']
487
 
488
 
489
  else:
490
  if( st.session_state.input_hybridType == "OpenSearch Hybrid Query"):
491
+ # if(st.session_state.input_multilingual):
492
+ # if(st.session_state.re_ranker == 'true' and st.session_state.input_reranker == 'Cohere Rerank'):
493
+ # path = "demostore-search-index-reindex-new/_search?search_pipeline=ml_inference_for_vector_search_and_language_translation_with_rerank"
494
+ # url = host + path
495
+ # hybrid_payload["ext"] = {"rerank": {
496
+ # "query_context": {
497
+ # "query_text": query
498
+ # }
499
+ # }}
500
+ # else:
501
+ # path = "demostore-search-index-reindex-new/_search?search_pipeline=ml_inference_for_vector_search_and_language_translation"
502
+ # url = host + path
503
+ #else:
504
+ path = "demostore-search-index-reindex-new/_search?search_pipeline=hybrid_search_pipeline"
505
+ url = host + path
506
+ if(st.session_state.re_ranker == 'true' and st.session_state.input_reranker == 'Cohere Rerank'):
507
+
508
+ path = "demostore-search-index-reindex-new/_search?search_pipeline=hybrid_rerank_pipeline"
509
  url = host + path
510
+ hybrid_payload["ext"] = {"rerank": {
511
+ "query_context": {
512
+ "query_text": query
513
+ }
514
+ }}
 
 
 
 
515
  r = requests.get(url, auth=awsauth, json=hybrid_payload, headers=headers)
 
 
516
  response_ = json.loads(r.text)
517
  docs = response_['hits']['hits']
518