File size: 13,356 Bytes
6759906
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
#!/usr/bin/env python3
"""
Code Completion Benchmark for CodeReality-1T Dataset
Evaluates code completion models using Pass@k metrics
"""

import json
import os
import re
import random
from typing import Dict, List, Tuple, Optional
from collections import defaultdict

def load_dataset_sample(data_dir: str, sample_size: int = 200) -> List[Dict]:
    """Load sample of repositories with code files."""
    print(f"🔍 Loading sample of {sample_size} repositories with code files...")

    repositories = []
    files = [f for f in os.listdir(data_dir) if f.endswith('.jsonl')]
    random.shuffle(files)

    for filename in files[:15]:  # Sample from first 15 files
        file_path = os.path.join(data_dir, filename)
        try:
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                for line in f:
                    if len(repositories) >= sample_size:
                        break
                    try:
                        repo_data = json.loads(line)
                        # Filter repositories with code files
                        if has_code_files(repo_data):
                            repositories.append(repo_data)
                    except json.JSONDecodeError:
                        continue
        except Exception as e:
            continue

        if len(repositories) >= sample_size:
            break

    print(f"✅ Loaded {len(repositories)} repositories with code files")
    return repositories

def has_code_files(repo: Dict) -> bool:
    """Check if repository contains code files."""
    code_extensions = {'.py', '.js', '.java', '.cpp', '.c', '.go', '.rs', '.ts'}

    files = repo.get('files', [])
    for file_obj in files:
        if isinstance(file_obj, dict):
            file_path = file_obj.get('path', '')
            if any(file_path.endswith(ext) for ext in code_extensions):
                return True
    return False

def extract_function_snippets(repo: Dict, language: str = 'python') -> List[Dict]:
    """Extract function definitions for completion tasks."""
    snippets = []

    # Language-specific patterns
    patterns = {
        'python': r'def\s+(\w+)\s*\([^)]*\):\s*',
        'javascript': r'function\s+(\w+)\s*\([^)]*\)\s*{',
        'java': r'(?:public|private|protected)?\s*(?:static)?\s*\w+\s+(\w+)\s*\([^)]*\)\s*{',
        'cpp': r'\w+\s+(\w+)\s*\([^)]*\)\s*{',
    }

    if language not in patterns:
        return snippets

    pattern = patterns[language]
    extension_map = {
        'python': '.py',
        'javascript': '.js',
        'java': '.java',
        'cpp': '.cpp'
    }

    target_ext = extension_map[language]

    files = repo.get('files', [])
    for file_obj in files:
        if isinstance(file_obj, dict):
            file_path = file_obj.get('path', '')
            content = file_obj.get('content', '')

            if file_path.endswith(target_ext) and content:
                matches = list(re.finditer(pattern, content, re.MULTILINE))

                for match in matches:
                    start_pos = match.start()
                    function_name = match.group(1)

                    # Get context before function
                    lines_before = content[:start_pos].split('\n')
                    context_lines = lines_before[-5:] if len(lines_before) >= 5 else lines_before
                    context = '\n'.join(context_lines)

                    # Get function body (simplified - until next function or end)
                    remaining_content = content[start_pos:]
                    lines = remaining_content.split('\n')

                    function_lines = []
                    indent_level = None

                    for i, line in enumerate(lines):
                        if i == 0:
                            function_lines.append(line)
                            continue

                        # Detect indentation level
                        if indent_level is None and line.strip():
                            indent_level = len(line) - len(line.lstrip())

                        # Stop if we hit same or lower indentation level (end of function)
                        if line.strip() and indent_level is not None:
                            current_indent = len(line) - len(line.lstrip())
                            if current_indent <= indent_level and not line.strip().startswith(('if', 'for', 'while', 'try', 'except', 'else', 'elif')):
                                break

                        function_lines.append(line)

                        # Limit function length
                        if len(function_lines) > 20:
                            break

                    function_body = '\n'.join(function_lines)

                    # Create completion task: provide function signature, expect body
                    if len(function_lines) > 3:  # Only meaningful functions
                        snippets.append({
                            'function_name': function_name,
                            'context': context,
                            'prompt': function_lines[0],  # Function signature
                            'completion': '\n'.join(function_lines[1:]),  # Function body
                            'file_path': file_path,
                            'language': language
                        })

    return snippets

def simple_code_completion_model(prompt: str, language: str) -> List[str]:
    """Simple rule-based code completion for demonstration."""
    completions = []

    # Generate multiple completions (for Pass@k evaluation)
    templates = {
        'python': [
            "    pass",
            "    return None",
            "    # TODO: implement this function\n    pass",
            "    result = None\n    return result",
            "    # Implementation needed\n    raise NotImplementedError()"
        ],
        'javascript': [
            "    return null;",
            "    // TODO: implement\n    return;",
            "    throw new Error('Not implemented');",
            "    var result = null;\n    return result;",
            "    console.log('Function called');\n    return;"
        ],
        'java': [
            "    return null;",
            "    // TODO: implement this method\n    return null;",
            "    throw new UnsupportedOperationException();",
            "    Object result = null;\n    return result;",
            "    System.out.println(\"Method called\");\n    return null;"
        ]
    }

    if language in templates:
        # Return multiple variations for Pass@k evaluation
        return templates[language]
    else:
        return ["// TODO: implement"]

def evaluate_completion_quality(predicted: str, actual: str) -> float:
    """Simple evaluation of completion quality."""
    # Normalize strings
    pred_lines = [line.strip() for line in predicted.split('\n') if line.strip()]
    actual_lines = [line.strip() for line in actual.split('\n') if line.strip()]

    if not actual_lines:
        return 0.0

    # Check for basic structural similarity
    score = 0.0

    # Check if both are empty implementations
    empty_indicators = ['pass', 'todo', 'not implemented', 'null', 'return;', 'return null'}
    pred_empty = any(indicator in predicted.lower() for indicator in empty_indicators)
    actual_empty = any(indicator in actual.lower() for indicator in empty_indicators)

    if pred_empty and actual_empty:
        score += 0.8
    elif not pred_empty and not actual_empty:
        # Check for keyword similarity
        pred_keywords = set(re.findall(r'\b\w+\b', predicted.lower()))
        actual_keywords = set(re.findall(r'\b\w+\b', actual.lower()))

        if actual_keywords:
            keyword_overlap = len(pred_keywords & actual_keywords) / len(actual_keywords)
            score += keyword_overlap * 0.6

        # Check for similar line count
        line_ratio = min(len(pred_lines), len(actual_lines)) / max(len(pred_lines), len(actual_lines))
        score += line_ratio * 0.4

    return min(score, 1.0)

def calculate_pass_at_k(completion_results: List[Tuple[List[str], str]], k: int = 1) -> float:
    """Calculate Pass@k metric."""
    if k <= 0:
        return 0.0

    total_passed = 0

    for completions, ground_truth in completion_results:
        # Take top k completions
        top_k_completions = completions[:k]

        # Check if any completion passes
        passed = False
        for completion in top_k_completions:
            quality_score = evaluate_completion_quality(completion, ground_truth)
            if quality_score > 0.5:  # Threshold for "passing"
                passed = True
                break

        if passed:
            total_passed += 1

    return total_passed / len(completion_results) if completion_results else 0.0

def run_completion_benchmark(repositories: List[Dict]) -> Dict:
    """Run code completion benchmark."""
    print("🧮 Running code completion benchmark...")

    results = {
        'total_repositories': len(repositories),
        'completion_tasks': [],
        'language_stats': defaultdict(int),
        'pass_at_1': 0.0,
        'pass_at_3': 0.0,
        'pass_at_5': 0.0,
        'average_quality': 0.0
    }

    completion_results = []
    quality_scores = []

    # Extract function snippets from repositories
    for repo in repositories:
        for language in ['python', 'javascript', 'java']:
            snippets = extract_function_snippets(repo, language)

            for snippet in snippets[:2]:  # Limit per repo for performance
                results['language_stats'][language] += 1

                # Generate completions
                completions = simple_code_completion_model(snippet['prompt'], language)
                ground_truth = snippet['completion']

                completion_results.append((completions, ground_truth))

                # Calculate quality for first completion
                if completions:
                    quality = evaluate_completion_quality(completions[0], ground_truth)
                    quality_scores.append(quality)

                results['completion_tasks'].append({
                    'function_name': snippet['function_name'],
                    'language': language,
                    'prompt_length': len(snippet['prompt']),
                    'completion_length': len(ground_truth)
                })

    # Calculate metrics
    results['pass_at_1'] = calculate_pass_at_k(completion_results, 1)
    results['pass_at_3'] = calculate_pass_at_k(completion_results, 3)
    results['pass_at_5'] = calculate_pass_at_k(completion_results, 5)
    results['average_quality'] = sum(quality_scores) / len(quality_scores) if quality_scores else 0.0

    return results

def print_benchmark_results(results: Dict):
    """Print formatted benchmark results."""
    print("=" * 60)
    print("🎯 CODE COMPLETION BENCHMARK RESULTS")
    print("=" * 60)

    print(f"Total repositories: {results['total_repositories']}")
    print(f"Completion tasks: {len(results['completion_tasks'])}")

    print(f"\n📊 Pass@k Metrics:")
    print(f"  Pass@1: {results['pass_at_1']:.3f}")
    print(f"  Pass@3: {results['pass_at_3']:.3f}")
    print(f"  Pass@5: {results['pass_at_5']:.3f}")
    print(f"  Average Quality: {results['average_quality']:.3f}")

    print(f"\n🔤 Language Distribution:")
    for language, count in sorted(results['language_stats'].items(), key=lambda x: x[1], reverse=True):
        print(f"  {language}: {count} functions")

    print(f"\n💡 Insights:")
    print("- This is a simplified demonstration benchmark")
    print("- Real evaluation requires more sophisticated code execution")
    print("- CodeReality-1T provides diverse, noisy code for robust testing")
    print("- Consider functional correctness testing for production models")

def main():
    """Run code completion benchmark."""
    print("🚀 CodeReality-1T Code Completion Benchmark")
    print("=" * 60)

    # Configuration
    data_dir = "/mnt/z/CodeReality_Final/unified_dataset"
    sample_size = 100

    if not os.path.exists(data_dir):
        print(f"❌ Dataset directory not found: {data_dir}")
        print("Please update the data_dir path to point to your CodeReality-1T dataset")
        return

    # Load dataset sample
    repositories = load_dataset_sample(data_dir, sample_size)

    if not repositories:
        print("❌ No repositories loaded. Check dataset path.")
        return

    # Run benchmark
    results = run_completion_benchmark(repositories)

    # Print results
    print_benchmark_results(results)

    # Save results
    output_file = "code_completion_results.json"
    with open(output_file, 'w') as f:
        # Convert defaultdict to regular dict for JSON serialization
        results_json = {
            'total_repositories': results['total_repositories'],
            'completion_tasks': results['completion_tasks'],
            'language_stats': dict(results['language_stats']),
            'pass_at_1': results['pass_at_1'],
            'pass_at_3': results['pass_at_3'],
            'pass_at_5': results['pass_at_5'],
            'average_quality': results['average_quality']
        }
        json.dump(results_json, f, indent=2)

    print(f"\n💾 Results saved to: {output_file}")

if __name__ == "__main__":
    main()