gsaltintas commited on
Commit
f386c0a
·
verified ·
1 Parent(s): 0b2f7d3

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +0 -306
README.md CHANGED
@@ -7,312 +7,6 @@ pretty_name: Tokenization Robustness
7
  tags:
8
  - multilingual
9
  - tokenization
10
- dataset_info:
11
- - config_name: farsi_tokenizer_robustness_cannonical
12
- features:
13
- - name: question
14
- dtype: string
15
- - name: choices
16
- list: string
17
- - name: answer
18
- dtype: int64
19
- - name: answer_label
20
- dtype: string
21
- - name: split
22
- dtype: string
23
- - name: subcategories
24
- dtype: string
25
- - name: lang
26
- dtype: string
27
- - name: second_lang
28
- dtype: string
29
- - name: coding_lang
30
- dtype: string
31
- - name: notes
32
- dtype: string
33
- - name: id
34
- dtype: string
35
- - name: set_id
36
- dtype: float64
37
- - name: variation_id
38
- dtype: float64
39
- splits:
40
- - name: test
41
- num_bytes: 12118
42
- num_examples: 45
43
- download_size: 10761
44
- dataset_size: 12118
45
- - config_name: farsi_tokenizer_robustness_code_language_script_switching
46
- features:
47
- - name: question
48
- dtype: string
49
- - name: choices
50
- list: string
51
- - name: answer
52
- dtype: int64
53
- - name: answer_label
54
- dtype: string
55
- - name: split
56
- dtype: string
57
- - name: subcategories
58
- dtype: string
59
- - name: lang
60
- dtype: string
61
- - name: second_lang
62
- dtype: string
63
- - name: coding_lang
64
- dtype: string
65
- - name: notes
66
- dtype: string
67
- - name: id
68
- dtype: string
69
- - name: set_id
70
- dtype: float64
71
- - name: variation_id
72
- dtype: float64
73
- splits:
74
- - name: test
75
- num_bytes: 10823
76
- num_examples: 45
77
- download_size: 9244
78
- dataset_size: 10823
79
- - config_name: farsi_tokenizer_robustness_colloquial
80
- features:
81
- - name: question
82
- dtype: string
83
- - name: choices
84
- list: string
85
- - name: answer
86
- dtype: int64
87
- - name: answer_label
88
- dtype: string
89
- - name: split
90
- dtype: string
91
- - name: subcategories
92
- dtype: string
93
- - name: lang
94
- dtype: string
95
- - name: second_lang
96
- dtype: string
97
- - name: coding_lang
98
- dtype: string
99
- - name: notes
100
- dtype: string
101
- - name: id
102
- dtype: string
103
- - name: set_id
104
- dtype: float64
105
- - name: variation_id
106
- dtype: float64
107
- splits:
108
- - name: test
109
- num_bytes: 9788
110
- num_examples: 45
111
- download_size: 9253
112
- dataset_size: 9788
113
- - config_name: farsi_tokenizer_robustness_diacritics_presence_absence
114
- features:
115
- - name: question
116
- dtype: string
117
- - name: choices
118
- list: string
119
- - name: answer
120
- dtype: int64
121
- - name: answer_label
122
- dtype: string
123
- - name: split
124
- dtype: string
125
- - name: subcategories
126
- dtype: string
127
- - name: lang
128
- dtype: string
129
- - name: second_lang
130
- dtype: string
131
- - name: coding_lang
132
- dtype: string
133
- - name: notes
134
- dtype: string
135
- - name: id
136
- dtype: string
137
- - name: set_id
138
- dtype: float64
139
- - name: variation_id
140
- dtype: float64
141
- splits:
142
- - name: test
143
- num_bytes: 12047
144
- num_examples: 45
145
- download_size: 10165
146
- dataset_size: 12047
147
- - config_name: farsi_tokenizer_robustness_keyboard_proximity_errors
148
- features:
149
- - name: question
150
- dtype: string
151
- - name: choices
152
- list: string
153
- - name: answer
154
- dtype: int64
155
- - name: answer_label
156
- dtype: string
157
- - name: split
158
- dtype: string
159
- - name: subcategories
160
- dtype: string
161
- - name: lang
162
- dtype: string
163
- - name: second_lang
164
- dtype: string
165
- - name: coding_lang
166
- dtype: string
167
- - name: notes
168
- dtype: string
169
- - name: id
170
- dtype: string
171
- - name: set_id
172
- dtype: float64
173
- - name: variation_id
174
- dtype: float64
175
- splits:
176
- - name: test
177
- num_bytes: 10835
178
- num_examples: 45
179
- download_size: 9455
180
- dataset_size: 10835
181
- - config_name: farsi_tokenizer_robustness_romanization
182
- features:
183
- - name: question
184
- dtype: string
185
- - name: choices
186
- list: string
187
- - name: answer
188
- dtype: int64
189
- - name: answer_label
190
- dtype: string
191
- - name: split
192
- dtype: string
193
- - name: subcategories
194
- dtype: string
195
- - name: lang
196
- dtype: string
197
- - name: second_lang
198
- dtype: string
199
- - name: coding_lang
200
- dtype: string
201
- - name: notes
202
- dtype: string
203
- - name: id
204
- dtype: string
205
- - name: set_id
206
- dtype: float64
207
- - name: variation_id
208
- dtype: float64
209
- splits:
210
- - name: test
211
- num_bytes: 8399
212
- num_examples: 45
213
- download_size: 8952
214
- dataset_size: 8399
215
- - config_name: farsi_tokenizer_robustness_word_reordering
216
- features:
217
- - name: question
218
- dtype: string
219
- - name: choices
220
- list: string
221
- - name: answer
222
- dtype: int64
223
- - name: answer_label
224
- dtype: string
225
- - name: split
226
- dtype: string
227
- - name: subcategories
228
- dtype: string
229
- - name: lang
230
- dtype: string
231
- - name: second_lang
232
- dtype: string
233
- - name: coding_lang
234
- dtype: string
235
- - name: notes
236
- dtype: string
237
- - name: id
238
- dtype: string
239
- - name: set_id
240
- dtype: float64
241
- - name: variation_id
242
- dtype: float64
243
- splits:
244
- - name: test
245
- num_bytes: 10883
246
- num_examples: 45
247
- download_size: 9566
248
- dataset_size: 10883
249
- - config_name: farsi_tokenizer_robustness_word_spacing_zero-width_characters_extra_space
250
- features:
251
- - name: question
252
- dtype: string
253
- - name: choices
254
- list: string
255
- - name: answer
256
- dtype: int64
257
- - name: answer_label
258
- dtype: string
259
- - name: split
260
- dtype: string
261
- - name: subcategories
262
- dtype: string
263
- - name: lang
264
- dtype: string
265
- - name: second_lang
266
- dtype: string
267
- - name: coding_lang
268
- dtype: string
269
- - name: notes
270
- dtype: string
271
- - name: id
272
- dtype: string
273
- - name: set_id
274
- dtype: float64
275
- - name: variation_id
276
- dtype: float64
277
- splits:
278
- - name: test
279
- num_bytes: 12666
280
- num_examples: 45
281
- download_size: 9993
282
- dataset_size: 12666
283
- configs:
284
- - config_name: farsi_tokenizer_robustness_cannonical
285
- data_files:
286
- - split: test
287
- path: farsi_tokenizer_robustness_cannonical/test-*
288
- - config_name: farsi_tokenizer_robustness_code_language_script_switching
289
- data_files:
290
- - split: test
291
- path: farsi_tokenizer_robustness_code_language_script_switching/test-*
292
- - config_name: farsi_tokenizer_robustness_colloquial
293
- data_files:
294
- - split: test
295
- path: farsi_tokenizer_robustness_colloquial/test-*
296
- - config_name: farsi_tokenizer_robustness_diacritics_presence_absence
297
- data_files:
298
- - split: test
299
- path: farsi_tokenizer_robustness_diacritics_presence_absence/test-*
300
- - config_name: farsi_tokenizer_robustness_keyboard_proximity_errors
301
- data_files:
302
- - split: test
303
- path: farsi_tokenizer_robustness_keyboard_proximity_errors/test-*
304
- - config_name: farsi_tokenizer_robustness_romanization
305
- data_files:
306
- - split: test
307
- path: farsi_tokenizer_robustness_romanization/test-*
308
- - config_name: farsi_tokenizer_robustness_word_reordering
309
- data_files:
310
- - split: test
311
- path: farsi_tokenizer_robustness_word_reordering/test-*
312
- - config_name: farsi_tokenizer_robustness_word_spacing_zero-width_characters_extra_space
313
- data_files:
314
- - split: test
315
- path: farsi_tokenizer_robustness_word_spacing_zero-width_characters_extra_space/test-*
316
  ---
317
 
318
  # Dataset Card for Tokenization Robustness
 
7
  tags:
8
  - multilingual
9
  - tokenization
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  ---
11
 
12
  # Dataset Card for Tokenization Robustness