File size: 6,248 Bytes
9b5934a
 
514b248
 
 
e6842e0
 
 
 
514b248
 
e6842e0
fee88d4
 
f9ed690
e6842e0
 
514b248
 
e6842e0
b640c63
e6842e0
 
 
 
 
 
 
 
 
 
fee88d4
 
e6842e0
 
 
fee88d4
 
e6842e0
 
 
fee88d4
 
e6842e0
 
 
 
 
 
 
 
 
 
 
 
514b248
 
 
e6842e0
514b248
e6842e0
 
 
 
 
514b248
e6842e0
 
514b248
e6842e0
514b248
e6842e0
 
514b248
e6842e0
514b248
e6842e0
 
514b248
e6842e0
 
514b248
e6842e0
 
514b248
b640c63
e6842e0
 
514b248
 
e6842e0
 
514b248
 
e6842e0
 
 
514b248
e6842e0
 
 
 
514b248
 
e6842e0
514b248
e6842e0
 
514b248
e6842e0
74b79b1
e6842e0
74b79b1
514b248
e6842e0
 
 
 
514b248
e6842e0
514b248
e6842e0
514b248
e6842e0
 
514b248
 
e6842e0
 
 
514b248
e6842e0
 
514b248
e6842e0
514b248
 
e6842e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
02925f0
e6842e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281b21c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
# SPDX-FileCopyrightText: 2025 J. Manrique Lopez de la Fuente <[email protected]>
# SPDX-FileCopyrightText: 2025 Industria de Diseño Textil S.A. INDITEX
#
# SPDX-License-Identifier: Apache-2.0

import gradio as gr
from huggingface_hub import InferenceClient
import os

# --- Available Models ---
# We will use 'instruct' or 'chat' models because they are good following orders
AVAILABLE_MODELS = {
    "Llama 3 8B Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
    "Gemma 2 9B Instruct": "google/gemma-2-9b-it",
    "Mistral 7B Instruct": "mistralai/Mistral-7B-Instruct-v0.3",
}

# --- Prompt Template ---

PROMPT_TEMPLATE = """
You are an expert on open-source software compliance and licensing.
Using **only** the license text provided below, create a concise Markdown
summary that complies with *all* the rules in this prompt.

## Output template (follow exactly)

# ⚖ <Full official license name>

<One-paragraph abstract — maximum 250 characters, including spaces>

## Permissions
* ✅ <permission 1 — ≤ 80 chars>
* ✅ <permission 2 — ≤ 80 chars>


## Limitations
* ❌ <limitation 1 — ≤ 80 chars>
* ❌ <limitation 2 — ≤ 80 chars>


## Conditions
* ℹ <condition 1 — ≤ 80 chars>
* ℹ <condition 2 — ≤ 80 chars>


## Formatting & content rules

1. **Headings, order, and emojis must match the template exactly.**
2. Lists: one item per line, max 10 items per list, no duplicates.
3. Each item **must** start with its emoji (✅ / ❌ / ℹ) followed by a single space.
4. Do not add or omit sections, headings, or blank lines.
5. Keep language clear and specific; avoid vague terms like “etc.”
6. Stay within every stated character limit; shorten items if needed.
7. Use plain ASCII characters only (no smart quotes, long dashes, or trademark symbols).

---

License text to analyze:
{license_text}

"""

# --- Función Principal de Análisis ---
def analyze_license(model_display_name, license_text):
    """
    Analyze license's text using the selected AI model
    """
    if not license_text.strip():
        return "Paste license text in the text area, please."

    # Get Hugging Face Token
    hf_token = os.getenv("HUGGINGFACE_HUB_TOKEN")
    if not hf_token:
        raise gr.Error("HUGGINGFACE_HUB_TOKEN is not set in Space's secrets.")

    # Get model ID
    model_id = AVAILABLE_MODELS[model_display_name]

    # Client initialize on Hugging Face Inference API
    client = InferenceClient(model=model_id, token=hf_token)

    # Prompt build
    final_prompt = PROMPT_TEMPLATE.format(license_text=license_text)
    
    # Messages to the model in the chat
    messages = [{"role": "user", "content": final_prompt}]

    try:
        # API call
        # Using chat_completion, ideal for 'instruct' models
        response = client.chat_completion(
            messages=messages,
            max_tokens=1024, # Enough for formatted output
            temperature=0.1, # Low temperature for more predictibles replies
            stream=False,
        )
        
        # Extract and return reply content
        markdown_summary = response.choices[0].message.content
        return markdown_summary

    except Exception as e:
        # API errors handling
        error_message = f"Error connecting to model '{model_display_name}': {e}"
        gr.Warning(error_message)
        return "Sorry, there has been an issue generating the response. Try again or choose a different model, please."


# --- Gradio UI ---
with gr.Blocks(theme=gr.themes.Soft()) as app:
    gr.Markdown("# License 🐱 Analyzer")
    gr.Markdown(
        "Paste the text of a software license, choose an AI model, and get its clear and structured brief. "
        "Brief quality will depend on model's capacity to follow instructions."
    )

    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("## Model & License")
            model_selector = gr.Dropdown(
                label="Select an AI Model",
                choices=list(AVAILABLE_MODELS.keys()),
                value=list(AVAILABLE_MODELS.keys())[0], # Default value
            )
            license_input = gr.Textbox(
                label="License Text",
                placeholder="Paste full license text here, please.",
                lines=20,
                max_lines=50,
            )
            analyze_button = gr.Button("Analyze License", variant="primary")

        with gr.Column(scale=1):
            gr.Markdown("## ✨ Generated Brief")
            markdown_output = gr.Markdown(
                label="Markdown formatted brief",
                value="Brief will appear here ..."
            )

    # Conectar el botón con la función de análisis
    analyze_button.click(
        fn=analyze_license,
        inputs=[model_selector, license_input],
        outputs=[markdown_output],
    )

    gr.Examples(
        examples=[
            [
                list(AVAILABLE_MODELS.keys())[0],
                """MIT License

Copyright (c) 2025 Jane Doe

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
            ],
        ],
        inputs=[model_selector, license_input],
        outputs=markdown_output,
        fn=analyze_license,
        cache_examples=True
    )


if __name__ == "__main__":
    app.launch()