Datasets:
Quentin Lhoest
commited on
Commit
·
5c0afa8
1
Parent(s):
53cd028
Release: 2.4.0
Browse filesCommit from https://github.com/huggingface/datasets/commit/401d4c4f9b9594cb6527c599c0e7a72ce1a0ea49
- README.md +17 -8
- wmt_utils.py +22 -18
README.md
CHANGED
|
@@ -46,22 +46,31 @@ task_ids: []
|
|
| 46 |
|
| 47 |
### Dataset Summary
|
| 48 |
|
| 49 |
-
|
| 50 |
|
| 51 |
-
Versions
|
| 52 |
-
sources. The base `
|
| 53 |
-
your own data/language pair
|
| 54 |
|
| 55 |
-
```
|
| 56 |
-
|
| 57 |
-
|
|
|
|
|
|
|
|
|
|
| 58 |
language_pair=("fr", "de"),
|
| 59 |
subsets={
|
| 60 |
datasets.Split.TRAIN: ["commoncrawl_frde"],
|
| 61 |
datasets.Split.VALIDATION: ["euelections_dev2019"],
|
| 62 |
},
|
| 63 |
)
|
| 64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
```
|
| 66 |
|
| 67 |
### Supported Tasks and Leaderboards
|
|
|
|
| 46 |
|
| 47 |
### Dataset Summary
|
| 48 |
|
| 49 |
+
Translation dataset based on the data from statmt.org.
|
| 50 |
|
| 51 |
+
Versions exist for different years using a combination of data
|
| 52 |
+
sources. The base `wmt` allows you to create a custom dataset by choosing
|
| 53 |
+
your own data/language pair. This can be done as follows:
|
| 54 |
|
| 55 |
+
```python
|
| 56 |
+
from datasets import inspect_dataset, load_dataset_builder
|
| 57 |
+
|
| 58 |
+
inspect_dataset("wmt17", "path/to/scripts")
|
| 59 |
+
builder = load_dataset_builder(
|
| 60 |
+
"path/to/scripts/wmt_utils.py",
|
| 61 |
language_pair=("fr", "de"),
|
| 62 |
subsets={
|
| 63 |
datasets.Split.TRAIN: ["commoncrawl_frde"],
|
| 64 |
datasets.Split.VALIDATION: ["euelections_dev2019"],
|
| 65 |
},
|
| 66 |
)
|
| 67 |
+
|
| 68 |
+
# Standard version
|
| 69 |
+
builder.download_and_prepare()
|
| 70 |
+
ds = builder.as_dataset()
|
| 71 |
+
|
| 72 |
+
# Streamable version
|
| 73 |
+
ds = builder.as_streaming_dataset()
|
| 74 |
```
|
| 75 |
|
| 76 |
### Supported Tasks and Leaderboards
|
wmt_utils.py
CHANGED
|
@@ -25,7 +25,6 @@ import itertools
|
|
| 25 |
import os
|
| 26 |
import re
|
| 27 |
import xml.etree.cElementTree as ElementTree
|
| 28 |
-
from abc import ABC, abstractmethod
|
| 29 |
|
| 30 |
import datasets
|
| 31 |
|
|
@@ -34,22 +33,31 @@ logger = datasets.logging.get_logger(__name__)
|
|
| 34 |
|
| 35 |
|
| 36 |
_DESCRIPTION = """\
|
| 37 |
-
|
| 38 |
|
| 39 |
-
Versions
|
| 40 |
-
sources. The base `
|
| 41 |
-
your own data/language pair
|
| 42 |
|
| 43 |
-
```
|
| 44 |
-
|
| 45 |
-
|
|
|
|
|
|
|
|
|
|
| 46 |
language_pair=("fr", "de"),
|
| 47 |
subsets={
|
| 48 |
datasets.Split.TRAIN: ["commoncrawl_frde"],
|
| 49 |
datasets.Split.VALIDATION: ["euelections_dev2019"],
|
| 50 |
},
|
| 51 |
)
|
| 52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
```
|
| 54 |
|
| 55 |
"""
|
|
@@ -662,20 +670,15 @@ class WmtConfig(datasets.BuilderConfig):
|
|
| 662 |
# +++++++++++++++++++++
|
| 663 |
|
| 664 |
|
| 665 |
-
class Wmt(
|
| 666 |
"""WMT translation dataset."""
|
| 667 |
|
|
|
|
|
|
|
| 668 |
def __init__(self, *args, **kwargs):
|
| 669 |
-
if type(self) == Wmt and "config" not in kwargs: # pylint: disable=unidiomatic-typecheck
|
| 670 |
-
raise ValueError(
|
| 671 |
-
"The raw `wmt_translate` can only be instantiated with the config "
|
| 672 |
-
"kwargs. You may want to use one of the `wmtYY_translate` "
|
| 673 |
-
"implementation instead to get the WMT dataset for a specific year."
|
| 674 |
-
)
|
| 675 |
super(Wmt, self).__init__(*args, **kwargs)
|
| 676 |
|
| 677 |
@property
|
| 678 |
-
@abstractmethod
|
| 679 |
def _subsets(self):
|
| 680 |
"""Subsets that make up each split of the dataset."""
|
| 681 |
raise NotImplementedError("This is a abstract method")
|
|
@@ -685,7 +688,8 @@ class Wmt(ABC, datasets.GeneratorBasedBuilder):
|
|
| 685 |
"""Subsets that make up each split of the dataset for the language pair."""
|
| 686 |
source, target = self.config.language_pair
|
| 687 |
filtered_subsets = {}
|
| 688 |
-
|
|
|
|
| 689 |
filtered_subsets[split] = []
|
| 690 |
for ss_name in ss_names:
|
| 691 |
dataset = DATASET_MAP[ss_name]
|
|
|
|
| 25 |
import os
|
| 26 |
import re
|
| 27 |
import xml.etree.cElementTree as ElementTree
|
|
|
|
| 28 |
|
| 29 |
import datasets
|
| 30 |
|
|
|
|
| 33 |
|
| 34 |
|
| 35 |
_DESCRIPTION = """\
|
| 36 |
+
Translation dataset based on the data from statmt.org.
|
| 37 |
|
| 38 |
+
Versions exist for different years using a combination of data
|
| 39 |
+
sources. The base `wmt` allows you to create a custom dataset by choosing
|
| 40 |
+
your own data/language pair. This can be done as follows:
|
| 41 |
|
| 42 |
+
```python
|
| 43 |
+
from datasets import inspect_dataset, load_dataset_builder
|
| 44 |
+
|
| 45 |
+
inspect_dataset("wmt17", "path/to/scripts")
|
| 46 |
+
builder = load_dataset_builder(
|
| 47 |
+
"path/to/scripts/wmt_utils.py",
|
| 48 |
language_pair=("fr", "de"),
|
| 49 |
subsets={
|
| 50 |
datasets.Split.TRAIN: ["commoncrawl_frde"],
|
| 51 |
datasets.Split.VALIDATION: ["euelections_dev2019"],
|
| 52 |
},
|
| 53 |
)
|
| 54 |
+
|
| 55 |
+
# Standard version
|
| 56 |
+
builder.download_and_prepare()
|
| 57 |
+
ds = builder.as_dataset()
|
| 58 |
+
|
| 59 |
+
# Streamable version
|
| 60 |
+
ds = builder.as_streaming_dataset()
|
| 61 |
```
|
| 62 |
|
| 63 |
"""
|
|
|
|
| 670 |
# +++++++++++++++++++++
|
| 671 |
|
| 672 |
|
| 673 |
+
class Wmt(datasets.GeneratorBasedBuilder):
|
| 674 |
"""WMT translation dataset."""
|
| 675 |
|
| 676 |
+
BUILDER_CONFIG_CLASS = WmtConfig
|
| 677 |
+
|
| 678 |
def __init__(self, *args, **kwargs):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 679 |
super(Wmt, self).__init__(*args, **kwargs)
|
| 680 |
|
| 681 |
@property
|
|
|
|
| 682 |
def _subsets(self):
|
| 683 |
"""Subsets that make up each split of the dataset."""
|
| 684 |
raise NotImplementedError("This is a abstract method")
|
|
|
|
| 688 |
"""Subsets that make up each split of the dataset for the language pair."""
|
| 689 |
source, target = self.config.language_pair
|
| 690 |
filtered_subsets = {}
|
| 691 |
+
subsets = self._subsets if self.config.subsets is None else self.config.subsets
|
| 692 |
+
for split, ss_names in subsets.items():
|
| 693 |
filtered_subsets[split] = []
|
| 694 |
for ss_name in ss_names:
|
| 695 |
dataset = DATASET_MAP[ss_name]
|