Kuberwastaken commited on
Commit
edf8f4b
·
1 Parent(s): ddb5158

Switched back to Resume Roasting

Browse files
.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gradio/certificate.pem DELETED
@@ -1,31 +0,0 @@
1
- -----BEGIN CERTIFICATE-----
2
- MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
- TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
- cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
- WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
- ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
- MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
- h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
- 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
- A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
- T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
- B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
- B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
- KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
- OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
- jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
- qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
- rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
- HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
- hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
- ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
- 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
- NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
- ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
- TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
- jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
- oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
- 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
- mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
- emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
- -----END CERTIFICATE-----
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,18 +0,0 @@
1
- ---
2
- title: Resume Roaster
3
- emoji: ⚡
4
- colorFrom: pink
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 5.15.0
8
- app_file: main.py
9
- pinned: false
10
- tags:
11
- - smolagents
12
- - agent
13
- - smolagent
14
- - tool
15
- - agent-course
16
- ---
17
-
18
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
__pycache__/app.cpython-310.pyc DELETED
Binary file (2.01 kB)
 
app.py DELETED
@@ -1,68 +0,0 @@
1
- from dotenv import load_dotenv
2
- import os
3
- from smolagents import CodeAgent, HfApiModel
4
- from smolagents.tools import Tool
5
- import yaml
6
-
7
- # Load environment variables from .env in the root
8
- load_dotenv()
9
-
10
- # Retrieve the Hugging Face token from the environment
11
- hf_token = os.getenv("HF_TOKEN")
12
-
13
- class FinalAnswerTool(Tool):
14
- name = "final_answer"
15
- description = "Use this tool to provide your final answer"
16
- inputs = {
17
- "answer": {
18
- "type": "string",
19
- "description": "The final answer to the problem"
20
- }
21
- }
22
- output_type = "string"
23
-
24
- def forward(self, answer: str) -> str:
25
- return answer
26
-
27
- class LinkedInScraperTool(Tool):
28
- name = "linkedin_scraper"
29
- description = "Scrapes LinkedIn profiles to extract professional information"
30
- inputs = {
31
- "linkedin_url": {
32
- "type": "string",
33
- "description": "The URL of the LinkedIn profile"
34
- }
35
- }
36
- output_type = "object"
37
-
38
- def forward(self, linkedin_url: str):
39
- # Dummy implementation; replace with actual scraping logic
40
- return {
41
- "experience": "10 years in industry",
42
- "skills": "Python, AI",
43
- "description": "Experienced professional with a robust background in technology."
44
- }
45
-
46
- def create_agent():
47
- final_answer = FinalAnswerTool()
48
- linkedin_scraper = LinkedInScraperTool()
49
-
50
- model = HfApiModel(
51
- max_tokens=2096,
52
- temperature=0.5,
53
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
54
- custom_role_conversions=None,
55
- )
56
-
57
- with open("prompts.yaml", 'r') as stream:
58
- prompt_templates = yaml.safe_load(stream)
59
-
60
- agent = CodeAgent(
61
- model=model,
62
- tools=[linkedin_scraper, final_answer],
63
- max_steps=6,
64
- verbosity_level=1,
65
- prompt_templates=prompt_templates
66
- )
67
-
68
- return agent
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
main.py DELETED
@@ -1,21 +0,0 @@
1
- import gradio as gr
2
- from app import create_agent
3
-
4
- def roast_profile(linkedin_url):
5
- agent = create_agent()
6
- response = agent.run(
7
- f"Scrape this LinkedIn profile: {linkedin_url} and create a humorous but not mean-spirited roast based on their experience, skills, and description. Keep it professional and avoid personal attacks."
8
- )
9
- return response
10
-
11
- demo = gr.Interface(
12
- fn=roast_profile,
13
- inputs=gr.Textbox(label="LinkedIn Profile URL"),
14
- outputs=gr.Textbox(label="Roast Result"),
15
- title="LinkedIn Profile Roaster",
16
- description="Enter a LinkedIn profile URL and get a humorous professional roast!",
17
- examples=[["https://www.linkedin.com/in/example-profile"]]
18
- )
19
-
20
- if __name__ == "__main__":
21
- demo.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
prompts.yaml DELETED
@@ -1,22 +0,0 @@
1
- system_prompt: |
2
- You are a witty professional roaster who analyzes LinkedIn profiles.
3
- Your job is to create humorous but not mean-spirited roasts based on people's professional experiences.
4
- Focus on gentle teasing about common LinkedIn behaviors like:
5
- - Overuse of buzzwords
6
- - Lengthy job titles
7
- - Humble brags
8
- - Excessive use of emojis
9
- - Connection collecting
10
- Avoid personal attacks or inappropriate content.
11
-
12
- task_prompt: |
13
- Using the provided LinkedIn profile information, create a humorous roast that:
14
- 1. References specific details from their profile
15
- 2. Keeps the tone light and professional
16
- 3. Focuses on common LinkedIn behaviors and professional quirks
17
- 4. Avoids mean-spirited or personal attacks
18
- 5. Would be appropriate to share in a professional setting
19
-
20
- final_answer:
21
- pre_messages: "Here is your final roast:"
22
- post_messages: ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt DELETED
@@ -1,6 +0,0 @@
1
- gradio
2
- langchain
3
- PyYAML
4
- PyMuPDF
5
- transformers
6
- smolagents
 
 
 
 
 
 
 
tools/LinkedInScraperTool.py DELETED
@@ -1,77 +0,0 @@
1
- from bs4 import BeautifulSoup
2
- import requests
3
- from typing import Dict
4
- from smolagents.tools import Tool
5
-
6
- class LinkedInScraperTool(Tool):
7
- name = "linkedin_scraper"
8
- description = "Scrapes LinkedIn profiles to extract professional information"
9
- inputs = {"linkedin_url": str}
10
- outputs = dict
11
-
12
- def __call__(self, linkedin_url: str) -> dict:
13
- try:
14
- # Add headers to mimic a browser request
15
- headers = {
16
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
17
- }
18
-
19
- response = requests.get(linkedin_url, headers=headers)
20
- soup = BeautifulSoup(response.text, 'html.parser')
21
-
22
- # Extract profile information
23
- profile_data = {
24
- 'name': self._extract_name(soup),
25
- 'headline': self._extract_headline(soup),
26
- 'about': self._extract_about(soup),
27
- 'experience': self._extract_experience(soup),
28
- 'education': self._extract_education(soup),
29
- 'skills': self._extract_skills(soup)
30
- }
31
-
32
- return profile_data
33
-
34
- except Exception as e:
35
- return {"error": f"Failed to scrape profile: {str(e)}"}
36
-
37
- def _extract_name(self, soup):
38
- name_element = soup.find('h1', {'class': 'text-heading-xlarge'})
39
- return name_element.text.strip() if name_element else "Name not found"
40
-
41
- def _extract_headline(self, soup):
42
- headline_element = soup.find('div', {'class': 'text-body-medium'})
43
- return headline_element.text.strip() if headline_element else "Headline not found"
44
-
45
- def _extract_about(self, soup):
46
- about_element = soup.find('div', {'class': 'pv-about-section'})
47
- return about_element.text.strip() if about_element else "About section not found"
48
-
49
- def _extract_experience(self, soup):
50
- experience_elements = soup.find_all('li', {'class': 'experience-item'})
51
- experience = []
52
- for exp in experience_elements:
53
- title_element = exp.find('h3', {'class': 'experience-title'})
54
- company_element = exp.find('p', {'class': 'experience-company'})
55
- if title_element and company_element:
56
- experience.append({
57
- 'title': title_element.text.strip(),
58
- 'company': company_element.text.strip()
59
- })
60
- return experience if experience else ["Experience not found"]
61
-
62
- def _extract_education(self, soup):
63
- education_elements = soup.find_all('li', {'class': 'education-item'})
64
- education = []
65
- for edu in education_elements:
66
- school_element = edu.find('h3', {'class': 'education-school'})
67
- degree_element = edu.find('p', {'class': 'education-degree'})
68
- if school_element and degree_element:
69
- education.append({
70
- 'school': school_element.text.strip(),
71
- 'degree': degree_element.text.strip()
72
- })
73
- return education if education else ["Education not found"]
74
-
75
- def _extract_skills(self, soup):
76
- skills_elements = soup.find_all('span', {'class': 'skill-name'})
77
- return [skill.text.strip() for skill in skills_elements] if skills_elements else ["Skills not found"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
tools/__pycache__/linkedin_tools.cpython-310.pyc DELETED
Binary file (3.19 kB)
 
tools/__pycache__/resume_tools.cpython-310.pyc DELETED
Binary file (2.3 kB)