# SPDX-FileCopyrightText: 2025 J. Manrique Lopez de la Fuente # # SPDX-License-Identifier: Apache-2.0 import gradio as gr from huggingface_hub import InferenceClient import os import logging # Configure logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') # --- Available Models --- # We will use 'instruct' or 'chat' models because they are good following orders AVAILABLE_MODELS = { "Qwen3": "Qwen/Qwen3-4B-Instruct-2507", "Gemma 2 9B Instruct": "google/gemma-2-9b-it", "Llama 3 8B Instruct": "meta-llama/Meta-Llama-3-8B-Instruct", "Llama 3.1 8B Instruct": "meta-llama/Llama-3.1-8B-Instruct", "Llama 3.2 3B Instruct": "meta-llama/Llama-3.2-3B-Instruct", "Mistral 7B Instruct": "mistralai/Mistral-7B-Instruct-v0.3", } # --- Prompt Template --- PROMPT_TEMPLATE = """ You are an expert on open-source software compliance and licensing. Using **only** the license text provided below, create a concise Markdown summary that complies with *all* the rules in this prompt. ## Output template (follow exactly) # ⚖ ## Permissions * ✅ * ✅ … ## Limitations * ❌ * ❌ … ## Conditions * ℹ * ℹ … ## Formatting & content rules 1. **Headings, order, and emojis must match the template exactly.** 2. Lists: one item per line, max 10 items per list, no duplicates. 3. Each item **must** start with its emoji (✅ / ❌ / ℹ) followed by a single space. 4. Do not add or omit sections, headings, or blank lines. 5. Keep language clear and specific; avoid vague terms like “etc.” 6. Stay within every stated character limit; shorten items if needed. 7. Use plain ASCII characters only (no smart quotes, long dashes, or trademark symbols). --- License text to analyze: {license_text} """ # --- Analysis Main Function --- def analyze_license(model_display_name, license_text): """ Analyze license's text using the selected AI model """ logging.info(f"Analyzing license with model: {model_display_name}") if not license_text.strip(): logging.warning("License text is empty.") return "Paste license text in the text area, please." # Get Hugging Face Token hf_token = os.getenv("HUGGINGFACE_HUB_TOKEN") if not hf_token: logging.error("HUGGINGFACE_HUB_TOKEN is not set.") raise gr.Error("HUGGINGFACE_HUB_TOKEN is not set in Space's secrets.") # Get model ID model_id = AVAILABLE_MODELS[model_display_name] # Client initialize on Hugging Face Inference API client = InferenceClient(model=model_id, token=hf_token) # Prompt build final_prompt = PROMPT_TEMPLATE.format(license_text=license_text) # Messages to the model in the chat messages = [{"role": "user", "content": final_prompt}] try: logging.info(f"Calling Hugging Face API with model ID: {model_id}") # API call # Using chat_completion, ideal for 'instruct' models response = client.chat_completion( messages=messages, max_tokens=1024, # Enough for formatted output temperature=0.1, # Low temperature for more predictibles replies stream=False, ) # Extract and return reply content markdown_summary = response.choices[0].message.content logging.info("Successfully received response from API.") return markdown_summary except Exception as e: # API errors handling error_message = f"Error connecting to model '{model_display_name}'" logging.error(error_message, exc_info=True) # Add exc_info=True for full traceback gr.Warning(f"{error_message}: {e}") # Keep the user-facing warning concise return "Sorry, there has been an issue generating the response. Try again or choose a different model, please." # --- Gradio UI --- with gr.Blocks(theme=gr.themes.Soft()) as app: gr.Markdown("# License 🐱 Analyzer") gr.Markdown( "Paste the text of a software license, choose an AI model, and get its clear and structured brief. " "Brief quality will depend on model's capacity to follow instructions." ) with gr.Row(): with gr.Column(scale=1): gr.Markdown("## Model & License") model_selector = gr.Dropdown( label="Select an AI Model", choices=list(AVAILABLE_MODELS.keys()), value=list(AVAILABLE_MODELS.keys())[0], # Default value ) license_input = gr.Textbox( label="License Text", placeholder="Paste full license text here, please.", lines=20, max_lines=50, ) analyze_button = gr.Button("Analyze License", variant="primary") with gr.Column(scale=1): gr.Markdown("## ✨ Generated Brief") markdown_output = gr.Markdown( label="Markdown formatted brief", value="Brief will appear here ..." ) # Conectar el botón con la función de análisis analyze_button.click( fn=analyze_license, inputs=[model_selector, license_input], outputs=[markdown_output], ) gr.Examples( examples=[ [ list(AVAILABLE_MODELS.keys())[0], """MIT License Copyright (c) 2025 Jane Doe Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" ], ], inputs=[model_selector, license_input], outputs=markdown_output, fn=analyze_license, cache_examples=True ) if __name__ == "__main__": app.launch()