mstz commited on
Commit
92b31bc
·
1 Parent(s): 645cdef

updated to datasets 4.*

Browse files
Files changed (4) hide show
  1. README.md +11 -10
  2. spambase.data +0 -0
  3. spambase.py +0 -127
  4. spambase/train.csv +0 -0
README.md CHANGED
@@ -1,19 +1,20 @@
1
  ---
2
- language:
3
- - en
 
 
 
 
 
 
 
 
4
  tags:
5
- - spambase
6
  - tabular_classification
7
  - binary_classification
8
- - UCI
9
- pretty_name: Spambase
10
- size_categories:
11
- - 1K<n<10K
12
  task_categories:
13
  - tabular-classification
14
- configs:
15
- - spambase
16
- license: cc
17
  ---
18
  # Spambase
19
  The [Spambase dataset](https://archive.ics.uci.edu/ml/datasets/Spambase) from the [UCI ML repository](https://archive.ics.uci.edu/ml/datasets).
 
1
  ---
2
+ configs:
3
+ - config_name: spambase
4
+ data_files:
5
+ - path: spambase/train.csv
6
+ split: train
7
+ default: true
8
+ language: en
9
+ license: cc
10
+ pretty_name: Spambase
11
+ size_categories: 1M<n<10M
12
  tags:
 
13
  - tabular_classification
14
  - binary_classification
15
+ - multiclass_classification
 
 
 
16
  task_categories:
17
  - tabular-classification
 
 
 
18
  ---
19
  # Spambase
20
  The [Spambase dataset](https://archive.ics.uci.edu/ml/datasets/Spambase) from the [UCI ML repository](https://archive.ics.uci.edu/ml/datasets).
spambase.data DELETED
The diff for this file is too large to render. See raw diff
 
spambase.py DELETED
@@ -1,127 +0,0 @@
1
- """Spambase: A Census Dataset"""
2
-
3
- from typing import List
4
-
5
- import datasets
6
-
7
- import pandas
8
-
9
-
10
- VERSION = datasets.Version("1.0.0")
11
-
12
- DESCRIPTION = "Spambase dataset from the UCI ML repository."
13
- _HOMEPAGE = "https://archive.ics.uci.edu/ml/datasets/Spambase"
14
- _URLS = ("https://archive.ics.uci.edu/ml/datasets/Spambase")
15
- _CITATION = """
16
- @misc{misc_spambase_94,
17
- author = {Hopkins,Mark, Reeber,Erik, Forman,George & Suermondt,Jaap},
18
- title = {{Spambase}},
19
- year = {1999},
20
- howpublished = {UCI Machine Learning Repository},
21
- note = {{DOI}: \\url{10.24432/C53G6X}}
22
- }"""
23
-
24
- # Dataset info
25
- urls_per_split = {
26
- "train": "https://huggingface.co/datasets/mstz/spambase/raw/main/spambase.data"
27
- }
28
- features_types_per_config = {
29
- "spambase": {
30
- "word_freq_make": datasets.Value("float64"),
31
- "word_freq_address": datasets.Value("float64"),
32
- "word_freq_all": datasets.Value("float64"),
33
- "word_freq_3d": datasets.Value("float64"),
34
- "word_freq_our": datasets.Value("float64"),
35
- "word_freq_over": datasets.Value("float64"),
36
- "word_freq_remove": datasets.Value("float64"),
37
- "word_freq_internet": datasets.Value("float64"),
38
- "word_freq_order": datasets.Value("float64"),
39
- "word_freq_mail": datasets.Value("float64"),
40
- "word_freq_receive": datasets.Value("float64"),
41
- "word_freq_will": datasets.Value("float64"),
42
- "word_freq_people": datasets.Value("float64"),
43
- "word_freq_report": datasets.Value("float64"),
44
- "word_freq_addresses": datasets.Value("float64"),
45
- "word_freq_free": datasets.Value("float64"),
46
- "word_freq_business": datasets.Value("float64"),
47
- "word_freq_email": datasets.Value("float64"),
48
- "word_freq_you": datasets.Value("float64"),
49
- "word_freq_credit": datasets.Value("float64"),
50
- "word_freq_your": datasets.Value("float64"),
51
- "word_freq_font": datasets.Value("float64"),
52
- "word_freq_000": datasets.Value("float64"),
53
- "word_freq_money": datasets.Value("float64"),
54
- "word_freq_hp": datasets.Value("float64"),
55
- "word_freq_hpl": datasets.Value("float64"),
56
- "word_freq_george": datasets.Value("float64"),
57
- "word_freq_650": datasets.Value("float64"),
58
- "word_freq_lab": datasets.Value("float64"),
59
- "word_freq_labs": datasets.Value("float64"),
60
- "word_freq_telnet": datasets.Value("float64"),
61
- "word_freq_857": datasets.Value("float64"),
62
- "word_freq_data": datasets.Value("float64"),
63
- "word_freq_415": datasets.Value("float64"),
64
- "word_freq_85": datasets.Value("float64"),
65
- "word_freq_technology": datasets.Value("float64"),
66
- "word_freq_1999": datasets.Value("float64"),
67
- "word_freq_parts": datasets.Value("float64"),
68
- "word_freq_pm": datasets.Value("float64"),
69
- "word_freq_direct": datasets.Value("float64"),
70
- "word_freq_cs": datasets.Value("float64"),
71
- "word_freq_meeting": datasets.Value("float64"),
72
- "word_freq_original": datasets.Value("float64"),
73
- "word_freq_project": datasets.Value("float64"),
74
- "word_freq_re": datasets.Value("float64"),
75
- "word_freq_edu": datasets.Value("float64"),
76
- "word_freq_table": datasets.Value("float64"),
77
- "word_freq_conference": datasets.Value("float64"),
78
- "char_freq_;": datasets.Value("float64"),
79
- "char_freq_(": datasets.Value("float64"),
80
- "char_freq_[": datasets.Value("float64"),
81
- "char_freq_!": datasets.Value("float64"),
82
- "char_freq_$": datasets.Value("float64"),
83
- "char_freq_#": datasets.Value("float64"),
84
- "capital_run_length_average": datasets.Value("float64"),
85
- "capital_run_length_longest": datasets.Value("float64"),
86
- "capital_run_length_total": datasets.Value("float64"),
87
- "is_spam": datasets.ClassLabel(num_classes=2, names=("no", "yes"))
88
- },
89
-
90
- }
91
- features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}
92
-
93
-
94
- class SpambaseConfig(datasets.BuilderConfig):
95
- def __init__(self, **kwargs):
96
- super(SpambaseConfig, self).__init__(version=VERSION, **kwargs)
97
- self.features = features_per_config[kwargs["name"]]
98
-
99
-
100
- class Spambase(datasets.GeneratorBasedBuilder):
101
- # dataset versions
102
- DEFAULT_CONFIG = "spambase"
103
- BUILDER_CONFIGS = [
104
- SpambaseConfig(name="spambase",
105
- description="Spambase for binary classification.")
106
- ]
107
-
108
- def _info(self):
109
- info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
110
- features=features_per_config[self.config.name])
111
-
112
- return info
113
-
114
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
115
- downloads = dl_manager.download_and_extract(urls_per_split)
116
-
117
- return [
118
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]})
119
- ]
120
-
121
- def _generate_examples(self, filepath: str):
122
- data = pandas.read_csv(filepath)
123
-
124
- for row_id, row in data.iterrows():
125
- data_row = dict(row)
126
-
127
- yield row_id, data_row
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spambase/train.csv ADDED
The diff for this file is too large to render. See raw diff