File size: 32,554 Bytes
82ccde1
 
df0cb94
82ccde1
 
 
 
 
e1eeb11
 
 
82ccde1
 
 
 
 
 
 
e1eeb11
82ccde1
 
 
 
e1eeb11
 
82ccde1
e1eeb11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82ccde1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df0cb94
 
 
 
 
 
e1eeb11
df0cb94
 
 
 
 
 
 
 
 
 
 
82ccde1
 
 
 
 
 
 
 
 
 
 
 
 
e1eeb11
 
 
 
 
82ccde1
e1eeb11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74fa76b
e1eeb11
 
 
74fa76b
 
 
e1eeb11
 
 
82ccde1
 
 
 
 
 
 
 
 
 
 
 
df0cb94
74fa76b
 
 
 
df0cb94
 
 
 
82ccde1
 
 
df0cb94
74fa76b
df0cb94
 
 
 
74fa76b
df0cb94
82ccde1
 
 
 
 
 
 
 
 
 
 
 
df0cb94
74fa76b
 
 
 
df0cb94
 
 
 
82ccde1
 
 
df0cb94
74fa76b
df0cb94
 
e1eeb11
df0cb94
 
 
 
 
e1eeb11
74fa76b
 
 
82ccde1
 
 
 
 
 
 
 
e1eeb11
82ccde1
 
 
df0cb94
74fa76b
 
 
 
df0cb94
 
 
 
82ccde1
 
 
df0cb94
74fa76b
df0cb94
 
e1eeb11
df0cb94
 
 
 
 
e1eeb11
74fa76b
 
 
82ccde1
 
 
 
 
 
 
 
 
 
 
 
df0cb94
74fa76b
 
 
 
df0cb94
 
 
 
82ccde1
 
 
df0cb94
74fa76b
df0cb94
 
e1eeb11
df0cb94
 
 
 
 
e1eeb11
74fa76b
 
 
82ccde1
 
e1eeb11
82ccde1
e1eeb11
 
74fa76b
e1eeb11
 
74fa76b
 
e1eeb11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74fa76b
e1eeb11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74fa76b
 
e1eeb11
 
 
 
82ccde1
e1eeb11
82ccde1
e1eeb11
 
 
 
 
82ccde1
e1eeb11
82ccde1
e1eeb11
 
 
 
82ccde1
e1eeb11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82ccde1
e1eeb11
 
 
 
 
 
 
 
 
82ccde1
e1eeb11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82ccde1
e1eeb11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74fa76b
 
e1eeb11
 
 
74fa76b
 
e1eeb11
74fa76b
e1eeb11
 
 
 
 
 
 
 
 
 
 
 
82ccde1
e1eeb11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82ccde1
 
e1eeb11
 
 
 
 
 
 
74fa76b
e1eeb11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74fa76b
e1eeb11
 
74fa76b
82ccde1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e1eeb11
82ccde1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e1eeb11
 
 
82ccde1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
import time
import numpy as np
import pandas as pd 
import streamlit as st
from streamlit_option_menu import option_menu
from streamlit_extras.add_vertical_space import add_vertical_space
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.llms import LlamaCpp
from langchain.chains.question_answering import load_qa_chain
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationChain
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
import os

import warnings
warnings.filterwarnings('ignore')

def initialize_llm():
    """Initialize the local LLM model with optimized parameters for better performance"""
    try:
        model_path = "models/llama-2-7b-chat.Q4_K_M.gguf"
        if not os.path.exists(model_path):
            st.error(f"Model file not found at {model_path}")
            return None
            
        st.info("Loading LLM model... This may take a few moments.")
        llm = LlamaCpp(
            model_path=model_path,
            temperature=0.7,
            max_tokens=2000,
            top_p=0.9,
            verbose=True,
            n_ctx=2048,
            n_threads=4,
            n_batch=512,
            n_gpu_layers=0,
            f16_kv=True,
            seed=42
        )
        return llm
    except Exception as e:
        st.error(f"Error initializing LLM: {str(e)}")
        return None

def streamlit_config():
    st.set_page_config(page_title='Talent Track By AI', layout="wide")
    page_background_color = """
    <style>
    [data-testid="stHeader"] 
    {
    background: rgba(0,0,0,0);
    }
    </style>
    """
    st.markdown(page_background_color, unsafe_allow_html=True)
    st.markdown(f'<h1 style="text-align: center;">Talent Track By AI</h1>', unsafe_allow_html=True)

def process_resume(pdf):
    if pdf is not None:
        try:
            with st.spinner('Processing...'):
                pdf_chunks = resume_analyzer.pdf_to_chunks(pdf)
                summary_prompt = resume_analyzer.summary_prompt(query_with_chunks=pdf_chunks)
                summary = resume_analyzer.local_llm(chunks=pdf_chunks, analyze=summary_prompt)
                if summary:
                    st.session_state['resume_data'] = {
                        'pdf': pdf,
                        'chunks': pdf_chunks,
                        'summary': summary
                    }
                    return True
        except Exception as e:
            st.markdown(f'<h5 style="text-align: center;color: orange;">{e}</h5>', unsafe_allow_html=True)
    return False

class resume_analyzer:
    def pdf_to_chunks(pdf):
        pdf_reader = PdfReader(pdf)
        text = ""
        for page in pdf_reader.pages:
            text += page.extract_text()
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=700,
            chunk_overlap=200,
            length_function=len)
        chunks = text_splitter.split_text(text=text)
        return chunks

    def local_llm(chunks, analyze):
        try:
            # Initialize embeddings with error handling
            st.info("Initializing embeddings...")
            embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
            
            # Create vector store with error handling
            st.info("Creating vector store...")
            text_splitter = RecursiveCharacterTextSplitter(
                chunk_size=500,
                chunk_overlap=50,
                length_function=len
            )
            split_chunks = []
            for chunk in chunks:
                split_chunks.extend(text_splitter.split_text(chunk))
            
            vectorstores = FAISS.from_texts(split_chunks, embedding=embeddings)
            docs = vectorstores.similarity_search(query=analyze, k=3)
            
            # Get LLM instance
            st.info("Getting LLM instance...")
            llm = initialize_llm()
            if not llm:
                st.error("Failed to initialize LLM")
            return None
            
            # Create and run the chain
            st.info("Running analysis...")
        chain = load_qa_chain(llm=llm, chain_type='stuff')
        response = chain.run(input_documents=docs, question=analyze)
        return response
        except Exception as e:
            st.error(f"Error in LLM processing: {str(e)}")
            return None

    def summary_prompt(query_with_chunks):
        query = f''' need to detailed summarization of below resume and finally conclude them
                    """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
                    {query_with_chunks}
                    """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
                    '''
        return query

    def resume_summary():
        with st.form(key='Summary'):
            add_vertical_space(1)
            if 'resume_data' not in st.session_state:
            pdf = st.file_uploader(label='Upload Your Resume', type='pdf')
            add_vertical_space(2)
            submit = st.form_submit_button(label='Submit')
            add_vertical_space(1)
            else:
                st.info("Using previously uploaded resume")
                submit = st.form_submit_button(label='Analyze Again')
                add_vertical_space(1)
        
        add_vertical_space(3)
        if submit:
            if 'resume_data' not in st.session_state:
            if pdf is not None:
                    if process_resume(pdf):
                        st.markdown(f'<h4 style="color: orange;">Summary:</h4>', unsafe_allow_html=True)
                        st.write(st.session_state['resume_data']['summary'])
            else:
                            st.markdown(f'<h4 style="color: orange;">Summary:</h4>', unsafe_allow_html=True)
                st.write(st.session_state['resume_data']['summary'])

    def strength_prompt(query_with_chunks):
        query = f'''need to detailed analysis and explain of the strength of below resume and finally conclude them
                    """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
                    {query_with_chunks}
                    """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
                    '''
        return query

    def resume_strength():
        with st.form(key='Strength'):
            add_vertical_space(1)
            if 'resume_data' not in st.session_state:
            pdf = st.file_uploader(label='Upload Your Resume', type='pdf')
            add_vertical_space(2)
            submit = st.form_submit_button(label='Submit')
            add_vertical_space(1)
            else:
                st.info("Using previously uploaded resume")
                submit = st.form_submit_button(label='Analyze Again')
                add_vertical_space(1)

        add_vertical_space(3)
        if submit:
            if 'resume_data' not in st.session_state:
            if pdf is not None:
                    if process_resume(pdf):
                        strength_prompt = resume_analyzer.strength_prompt(query_with_chunks=st.session_state['resume_data']['summary'])
                        strength = resume_analyzer.local_llm(chunks=st.session_state['resume_data']['chunks'], analyze=strength_prompt)
                        if strength:
                            st.markdown(f'<h4 style="color: orange;">Strength:</h4>', unsafe_allow_html=True)
                            st.write(strength)
            else:
                strength_prompt = resume_analyzer.strength_prompt(query_with_chunks=st.session_state['resume_data']['summary'])
                strength = resume_analyzer.local_llm(chunks=st.session_state['resume_data']['chunks'], analyze=strength_prompt)
                            if strength:
                                st.markdown(f'<h4 style="color: orange;">Strength:</h4>', unsafe_allow_html=True)
                                st.write(strength)

    def weakness_prompt(query_with_chunks):
        query = f'''need to detailed analysis and explain of the weakness of below resume and how to improve make a better resume.
                    """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
                    {query_with_chunks}
                    """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
                    '''
        return query

    def resume_weakness():
        with st.form(key='Weakness'):
            add_vertical_space(1)
            if 'resume_data' not in st.session_state:
            pdf = st.file_uploader(label='Upload Your Resume', type='pdf')
            add_vertical_space(2)
            submit = st.form_submit_button(label='Submit')
            add_vertical_space(1)
            else:
                st.info("Using previously uploaded resume")
                submit = st.form_submit_button(label='Analyze Again')
                add_vertical_space(1)
        
        add_vertical_space(3)
        if submit:
            if 'resume_data' not in st.session_state:
            if pdf is not None:
                    if process_resume(pdf):
                        weakness_prompt = resume_analyzer.weakness_prompt(query_with_chunks=st.session_state['resume_data']['summary'])
                        weakness = resume_analyzer.local_llm(chunks=st.session_state['resume_data']['chunks'], analyze=weakness_prompt)
                        if weakness:
                            st.markdown(f'<h4 style="color: orange;">Weakness and Suggestions:</h4>', unsafe_allow_html=True)
                            st.write(weakness)
            else:
                weakness_prompt = resume_analyzer.weakness_prompt(query_with_chunks=st.session_state['resume_data']['summary'])
                weakness = resume_analyzer.local_llm(chunks=st.session_state['resume_data']['chunks'], analyze=weakness_prompt)
                            if weakness:
                                st.markdown(f'<h4 style="color: orange;">Weakness and Suggestions:</h4>', unsafe_allow_html=True)
                                st.write(weakness)

    def job_title_prompt(query_with_chunks):
        query = f''' what are the job roles i apply to likedin based on below?
                    """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
                    {query_with_chunks}
                    """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
                    '''
        return query

    def job_title_suggestion():
        with st.form(key='Job Titles'):
            add_vertical_space(1)
            if 'resume_data' not in st.session_state:
            pdf = st.file_uploader(label='Upload Your Resume', type='pdf')
            add_vertical_space(2)
            submit = st.form_submit_button(label='Submit')
            add_vertical_space(1)
            else:
                st.info("Using previously uploaded resume")
                submit = st.form_submit_button(label='Analyze Again')
                add_vertical_space(1)

        add_vertical_space(3)
        if submit:
            if 'resume_data' not in st.session_state:
            if pdf is not None:
                    if process_resume(pdf):
                        job_title_prompt = resume_analyzer.job_title_prompt(query_with_chunks=st.session_state['resume_data']['summary'])
                        job_title = resume_analyzer.local_llm(chunks=st.session_state['resume_data']['chunks'], analyze=job_title_prompt)
                        if job_title:
                            st.markdown(f'<h4 style="color: orange;">Job Titles:</h4>', unsafe_allow_html=True)
                            st.write(job_title)
            else:
                job_title_prompt = resume_analyzer.job_title_prompt(query_with_chunks=st.session_state['resume_data']['summary'])
                job_title = resume_analyzer.local_llm(chunks=st.session_state['resume_data']['chunks'], analyze=job_title_prompt)
                            if job_title:
                                st.markdown(f'<h4 style="color: orange;">Job Titles:</h4>', unsafe_allow_html=True)
                                st.write(job_title)

class linkedin_scraper:
    @staticmethod
    def webdriver_setup():
        """Set up Chrome webdriver with enhanced anti-detection measures"""
        try:
        options = webdriver.ChromeOptions()
            
            # Basic options
        options.add_argument('--no-sandbox')
        options.add_argument('--disable-dev-shm-usage')
            options.add_argument('--disable-gpu')
            options.add_argument('--disable-extensions')
            options.add_argument('--disable-notifications')
            
            # Window size and display
            options.add_argument('--window-size=1920,1080')
            options.add_argument('--start-maximized')
            
            # Enhanced privacy and security settings
            options.add_argument('--disable-blink-features=AutomationControlled')
            options.add_argument('--disable-web-security')
            options.add_argument('--allow-running-insecure-content')
            options.add_argument('--ignore-certificate-errors')
            options.add_argument('--ignore-ssl-errors')
            
            # Random user agent
            user_agents = [
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Edge/120.0.0.0'
            ]
            user_agent = np.random.choice(user_agents)
            options.add_argument(f'--user-agent={user_agent}')
            
            # Experimental options
            options.add_experimental_option('excludeSwitches', ['enable-automation', 'enable-logging'])
            options.add_experimental_option('useAutomationExtension', False)
            
            # Create driver
        driver = webdriver.Chrome(options=options)
            
            # Additional JavaScript to avoid detection
            driver.execute_cdp_cmd('Network.setUserAgentOverride', {"userAgent": user_agent})
            
            # Modify navigator properties
            driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
            driver.execute_script("Object.defineProperty(navigator, 'languages', {get: () => ['en-US', 'en']})")
            driver.execute_script("Object.defineProperty(navigator, 'plugins', {get: () => [1, 2, 3, 4, 5]})")
            
            # Set viewport and window size
            driver.execute_cdp_cmd('Emulation.setDeviceMetricsOverride', {
                'mobile': False,
                'width': 1920,
                'height': 1080,
                'deviceScaleFactor': 1,
            })
            
        return driver

        except Exception as e:
            st.error(f"Failed to initialize Chrome driver: {str(e)}")
            st.info("Please ensure Chrome browser is installed and updated to the latest version")
            return None

    @staticmethod
    def get_userinput():
        """Get job search parameters from user"""
        job_title = st.text_input('Enter Job Titles (comma separated):', 'Data Scientist')
        job_location = st.text_input('Enter Job Location:', 'India')
        job_count = st.number_input('Enter Number of Jobs to Scrape (max 100):', min_value=1, max_value=100, value=2)
        return job_title.split(','), job_location, job_count

    @staticmethod
    def build_url(job_title, job_location):
        """Build LinkedIn search URL"""
        formatted_title = '%20'.join(job_title[0].strip().split())  # Use first job title only
        formatted_location = '%20'.join(job_location.split())
        return f"https://www.linkedin.com/jobs/search?keywords={formatted_title}&location={formatted_location}"

    @staticmethod
    def scroll_page(driver, job_count):
        """Scroll page to load more jobs"""
        try:
            st.info("Scrolling page to load more jobs...")
            # Calculate number of scrolls needed (25 jobs per scroll approximately)
            scrolls = min(job_count // 25 + 1, 4)
            
            for i in range(scrolls):
                st.info(f"Scroll attempt {i+1}/{scrolls}")
                # Scroll to bottom
                driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                time.sleep(4)  # Wait for content to load
                
                try:
                    # Try to click "Show more" button if present
                    show_more_buttons = driver.find_elements(by=By.CSS_SELECTOR, value=[
                        "button.infinite-scroller__show-more-button",
                        "button.see-more-jobs",
                        "button[data-tracking-control-name='infinite-scroller_show-more']"
                    ])
                    
                    for button in show_more_buttons:
                        if button.is_displayed():
                            driver.execute_script("arguments[0].click();", button)
                            time.sleep(3)  # Wait for new content
                            break
                            
                except Exception as e:
                    st.warning(f"Could not find or click 'Show more' button: {str(e)}")
                    
                # Additional wait after last scroll
                if i == scrolls - 1:
                    time.sleep(5)
                    
        except Exception as e:
            st.warning(f"Error during page scrolling: {str(e)}")

    @staticmethod
    def scrape_jobs(driver, job_count):
        """Scrape job listings from LinkedIn with updated selectors"""
        jobs_data = {
            'company_name': [],
            'job_title': [],
            'location': [],
            'job_url': []
        }

        try:
            # Wait for job cards to load with explicit wait
            st.info("Waiting for page to load...")
            time.sleep(8)  # Increased initial wait time
            
            # Try multiple selectors for job cards
            selectors = [
                "div.job-card-container",
                "li.jobs-search-results__list-item",
                "div.base-card",
                "div.job-search-card",
                "li.jobs-search-results-list__list-item"
            ]
            
            job_cards = []
            for selector in selectors:
                try:
                    job_cards = driver.find_elements(by=By.CSS_SELECTOR, value=selector)
                    if job_cards:
                        st.success(f"Found job cards using selector: {selector}")
                        break
                except:
                    continue
            
            if not job_cards:
                st.error("Could not find any job listings. LinkedIn might have updated their page structure.")
                return pd.DataFrame(jobs_data)

            # Limit to requested number
            job_cards = job_cards[:job_count]
            
            st.info(f"Processing {len(job_cards)} job cards...")
            
            for card in job_cards:
                try:
                    # Company name selectors
                    company_selectors = [
                        ".job-card-container__company-name",
                        ".base-search-card__subtitle",
                        ".company-name",
                        "span[data-tracking-control-name='public_jobs_company_name']",
                        ".job-card-container__primary-description"
                    ]
                    
                    # Job title selectors
                    title_selectors = [
                        ".job-card-container__title",
                        ".base-search-card__title",
                        ".job-card-list__title",
                        "h3.base-search-card__title",
                        ".job-search-card__title"
                    ]
                    
                    # Location selectors
                    location_selectors = [
                        ".job-card-container__metadata-item",
                        ".base-search-card__metadata",
                        ".job-search-card__location",
                        "span[data-tracking-control-name='public_jobs_job-location']",
                        ".job-card-container__metadata-wrapper"
                    ]
                    
                    # Try to find company name
                    company = None
                    for selector in company_selectors:
                        try:
                            element = card.find_element(by=By.CSS_SELECTOR, value=selector)
                            company = element.text.strip()
                            if company:
                                break
                        except:
                            continue
                    
                    # Try to find job title
                    title = None
                    for selector in title_selectors:
                        try:
                            element = card.find_element(by=By.CSS_SELECTOR, value=selector)
                            title = element.text.strip()
                            if title:
                                break
                        except:
                            continue
                    
                    # Try to find location
                    location = None
                    for selector in location_selectors:
                        try:
                            element = card.find_element(by=By.CSS_SELECTOR, value=selector)
                            location = element.text.strip()
                            if location:
                                break
                        except:
                continue

                    # Try to find URL
                    try:
                        url = card.find_element(by=By.CSS_SELECTOR, value="a").get_attribute("href")
            except:
            try:
                            url = card.find_element(by=By.CSS_SELECTOR, value="a.base-card__full-link").get_attribute("href")
            except:
                            url = None
                    
                    if all([company, title, location, url]):
                        jobs_data['company_name'].append(company)
                        jobs_data['job_title'].append(title)
                        jobs_data['location'].append(location)
                        jobs_data['job_url'].append(url)
                        st.success(f"Successfully scraped job: {title} at {company}")
                    
                except Exception as e:
                    st.warning(f"Failed to scrape a job card: {str(e)}")
                    continue

            if not jobs_data['company_name']:
                st.error("Could not extract any job information. LinkedIn might be blocking automated access.")
                
        except Exception as e:
            st.error(f"Error during job scraping: {str(e)}")

        return pd.DataFrame(jobs_data)

    @staticmethod
    def display_results(df):
        """Display scraped job results"""
        if df.empty:
            st.error("No jobs were found. Please try again with different search parameters.")
            return

        st.markdown('### πŸ“Š Scraped Job Listings')
        
        # Display summary statistics
        st.markdown(f"**Total Jobs Found:** {len(df)}")
        st.markdown(f"**Unique Companies:** {df['company_name'].nunique()}")
        st.markdown(f"**Locations Covered:** {df['location'].nunique()}")
        
        # Display the dataframe
        st.dataframe(df)
        
        # Add download button
        csv = df.to_csv(index=False).encode('utf-8')
        st.download_button(
            "Download Results as CSV",
            csv,
            "linkedin_jobs.csv",
            "text/csv",
            key='download-csv'
        )

    def main():
        st.markdown('## πŸ” LinkedIn Job Search')
        
        job_titles, job_location, job_count = linkedin_scraper.get_userinput()
        
        if st.button('Start Scraping'):
            with st.spinner('Scraping LinkedIn jobs...'):
                try:
                        driver = linkedin_scraper.webdriver_setup()
                    if driver is None:
                        return
                        
                    url = linkedin_scraper.build_url(job_titles, job_location)
                    st.info(f"Searching: {url}")
                    
                    driver.get(url)
                    time.sleep(5)  # Increased initial wait time
                    
                    linkedin_scraper.scroll_page(driver, job_count)
                    df = linkedin_scraper.scrape_jobs(driver, job_count)
                    
                    driver.quit()
                    
                    if not df.empty:
                        linkedin_scraper.display_results(df)
                    else:
                        st.error('No jobs found matching your criteria. Try different search terms or location.')
                        
        except Exception as e:
                    st.error(f'An error occurred while scraping: {str(e)}')
                    if 'driver' in locals():
                driver.quit()

class career_chatbot:
    def initialize_session_state():
        # Initialize session state variables for the chatbot
        if "messages" not in st.session_state:
            st.session_state.messages = [
                {"role": "assistant", "content": "I'm your Career & Resume Assistant! Ask me anything about job searching, resume writing, interview preparation, or career development."}
            ]
        
        if "conversation_memory" not in st.session_state:
            st.session_state.conversation_memory = ConversationBufferMemory(return_messages=True)
        
        if "resume_data" not in st.session_state:
            st.session_state.resume_data = None
    
    def setup_chatbot_ui():
        with st.container():
            st.markdown(f'<h3 style="color: orange; text-align: center;">Career Advisor Chatbot</h3>', unsafe_allow_html=True)
            
            # Option to upload resume to provide context for the chatbot
            with st.expander("Upload Resume for Context (Optional)"):
                pdf = st.file_uploader(label='Upload Resume', type='pdf', key="chatbot_resume")
                if pdf is not None and st.button("Process Resume"):
                    with st.spinner('Processing resume for context...'):
                        try:
                            pdf_chunks = resume_analyzer.pdf_to_chunks(pdf)
                            summary_prompt = resume_analyzer.summary_prompt(query_with_chunks=pdf_chunks)
                            summary = resume_analyzer.local_llm(chunks=pdf_chunks, analyze=summary_prompt)
                            if summary:
                                st.session_state.resume_data = summary
                                st.success("Resume processed successfully! The chatbot now has context from your resume.")
                        except Exception as e:
                            st.error(f"Error processing resume: {e}")
            
            # Display chat messages
            for message in st.session_state.messages:
                with st.chat_message(message["role"]):
                    st.write(message["content"])
    
    def create_system_prompt():
        base_prompt = """You are a specialized career and job-search assistant. Your expertise is limited to:
1. Resume writing, analysis, and improvement
2. Job search strategies and techniques
3. Interview preparation and tips
4. Career development advice
5. LinkedIn profile optimization
6. Professional networking guidance
7. Salary negotiation tactics
8. Professional skill development recommendations

Answer questions ONLY related to these topics. For any off-topic questions, politely redirect the conversation back to career-related topics.
Your responses should be helpful, specific, and actionable. Use bullet points for clarity when appropriate.
"""
        
        # Add resume context if available
        if st.session_state.resume_data:
            resume_context = f"\nThe user has provided a resume with the following information:\n{st.session_state.resume_data}\n\nUse this context to provide personalized advice when relevant."
            return base_prompt + resume_context
        else:
            return base_prompt
    
    def process_user_input():
        # Get user input and clear the input box
        user_input = st.chat_input("Ask me about careers, job search, or resume advice...")
        
        if user_input:
            # Add user message to chat history
            st.session_state.messages.append({"role": "user", "content": user_input})
            
            # Display user message
            with st.chat_message("user"):
                st.write(user_input)
            
            # Generate response using the chatbot
            try:
                with st.spinner("Thinking..."):
                    llm = initialize_llm()
                    if not llm:
                        raise Exception("Failed to initialize LLM")
                    
                    # Update conversation memory
                    st.session_state.conversation_memory.chat_memory.add_user_message(user_input)
                    
                    system_prompt = career_chatbot.create_system_prompt()
                    chat_history = st.session_state.conversation_memory.buffer
                    
                    # Format prompt with system instructions and context
                    prompt = f"""
                    {system_prompt}
                    
                    Chat History: {chat_history}
                    
                    Human: {user_input}
                    Assistant:"""
                    
                    response = llm.predict(prompt)
                    
                    # Add assistant response to memory
                    st.session_state.conversation_memory.chat_memory.add_ai_message(response)
                    
                    # Add assistant response to chat history
                    st.session_state.messages.append({"role": "assistant", "content": response})
                    
                    # Display assistant response
                    with st.chat_message("assistant"):
                        st.write(response)
            
            except Exception as e:
                error_msg = f"Error generating response: {str(e)}"
                st.error(error_msg)
                st.session_state.messages.append({"role": "assistant", "content": "I'm sorry, I encountered an error. Please try again."})
    
    def main():
        career_chatbot.initialize_session_state()
        career_chatbot.setup_chatbot_ui()
        career_chatbot.process_user_input()

# Streamlit Configuration Setup
streamlit_config()
add_vertical_space(2)

with st.sidebar:
    add_vertical_space(4)
    option = option_menu(menu_title='', options=['Summary', 'Strength', 'Weakness', 'Job Titles', 'Linkedin Jobs', 'Career Chat'],
                         icons=['house-fill', 'database-fill', 'pass-fill', 'list-ul', 'linkedin', 'chat-dots-fill'])

if option == 'Summary':
    resume_analyzer.resume_summary()
elif option == 'Strength':
    resume_analyzer.resume_strength()
elif option == 'Weakness':
    resume_analyzer.resume_weakness()
elif option == 'Job Titles':
    resume_analyzer.job_title_suggestion()
elif option == 'Linkedin Jobs':
    linkedin_scraper.main()
elif option == 'Career Chat':
    career_chatbot.main()