Datasets:
mteb
/

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
AdnanElAssadi commited on
Commit
010fedd
·
verified ·
1 Parent(s): ec14b9c

Add BIRCO dataset with proper configs

Browse files
README.md ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ - config_name: corpus
4
+ splits:
5
+ - name: test
6
+ num_bytes: 3769275
7
+ num_examples: 5544
8
+ - config_name: queries
9
+ splits:
10
+ - name: test
11
+ num_bytes: 37845
12
+ num_examples: 60
13
+ - config_name: default
14
+ splits:
15
+ - name: test
16
+ num_bytes: 63090
17
+ num_examples: 6634
18
+ configs:
19
+ - config_name: corpus
20
+ data_files:
21
+ - split: test
22
+ path: data/corpus/test-*.parquet
23
+ - config_name: queries
24
+ data_files:
25
+ - split: test
26
+ path: data/queries/test-*.parquet
27
+ - config_name: default
28
+ data_files:
29
+ - split: test
30
+ path: data/default/test-*.parquet
31
+ ---
32
+
33
+ # Dataset Card for BIRCO-DorisMae-Test
34
+
35
+ ## Dataset Configurations
36
+
37
+ - **Corpus**: Document collection (5544 documents)
38
+ - **Queries**: Search queries (60 queries)
39
+ - **Default**: Relevance judgments (6634 judgments)
40
+
birco.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import datasets
3
+ from datasets import DownloadManager, DatasetInfo, Features, Value, Split, SplitGenerator
4
+
5
+ _CITATION = """"@misc{birco,\n title={{BIRCO: A Benchmark of Information Retrieval Tasks with Complex Objectives}},\n author={{Xiaoyue Wang et al.}},\n year={2024},\n url={https://arxiv.org/abs/2402.14151},\n}""""
6
+ _DESCRIPTION = """"BIRCO benchmark containing corpus, queries, and relevance judgments""""
7
+ _HOMEPAGE = "https://github.com/BIRCO-benchmark/BIRCO"
8
+ _LICENSE = "CC-BY-4.0"
9
+
10
+ class BIRCO(datasets.GeneratorBasedBuilder):
11
+ BUILDER_CONFIGS = [
12
+ datasets.BuilderConfig(
13
+ name="corpus",
14
+ version=datasets.Version("1.0.0"),
15
+ description="Document corpus",
16
+ ),
17
+ datasets.BuilderConfig(
18
+ name="queries",
19
+ version=datasets.Version("1.0.0"),
20
+ description="Search queries",
21
+ ),
22
+ datasets.BuilderConfig(
23
+ name="default",
24
+ version=datasets.Version("1.0.0"),
25
+ description="Relevance judgments",
26
+ ),
27
+ ]
28
+
29
+ def _info(self):
30
+ if self.config.name == "corpus":
31
+ features = Features({
32
+ "_id": Value("string"),
33
+ "text": Value("string"),
34
+ "title": Value("string")
35
+ })
36
+ elif self.config.name == "queries":
37
+ features = Features({
38
+ "_id": Value("string"),
39
+ "text": Value("string")
40
+ })
41
+ elif self.config.name == "default":
42
+ features = Features({
43
+ "query-id": Value("string"),
44
+ "corpus-id": Value("string"),
45
+ "score": Value("float64")
46
+ })
47
+
48
+ return DatasetInfo(
49
+ description=_DESCRIPTION,
50
+ features=features,
51
+ citation=_CITATION,
52
+ homepage=_HOMEPAGE,
53
+ license=_LICENSE
54
+ )
55
+
56
+ def _split_generators(self, dl_manager):
57
+ return [
58
+ SplitGenerator(
59
+ name=Split.TEST,
60
+ gen_kwargs={
61
+ "files": dl_manager.download_and_extract({
62
+ "data": f"data/{self.config.name}/test-*.parquet"
63
+ }),
64
+ "split": "test"
65
+ }
66
+ )
67
+ ]
68
+
69
+ def _generate_examples(self, files, split):
70
+ dataset = datasets.load_dataset("parquet", data_files=files["data"], split=split)
71
+ for idx, example in enumerate(dataset):
72
+ yield idx, example
data/corpus/test-00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6ac4461a42b43c7237e70caf428af0b874a78c02364db82bd27a970b626c04b
3
+ size 3769275
data/default/test-00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3b04f51a097f74d1aa3dffa70eb9643d25a2ee2c104116a44e729efa121ac9e
3
+ size 63090
data/queries/test-00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aca3680890b1c2ff15db90bc25ba99ceb661086b3a0269421c056c4a723c72e0
3
+ size 37845
dataset_infos.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "default": {
3
+ "dataset_size": 3870210,
4
+ "download_size": 3870210,
5
+ "features": {
6
+ "corpus": {
7
+ "_id": {
8
+ "dtype": "string",
9
+ "_type": "Value"
10
+ },
11
+ "text": {
12
+ "dtype": "string",
13
+ "_type": "Value"
14
+ },
15
+ "title": {
16
+ "dtype": "string",
17
+ "_type": "Value"
18
+ }
19
+ },
20
+ "queries": {
21
+ "_id": {
22
+ "dtype": "string",
23
+ "_type": "Value"
24
+ },
25
+ "text": {
26
+ "dtype": "string",
27
+ "_type": "Value"
28
+ }
29
+ },
30
+ "default": {
31
+ "query-id": {
32
+ "dtype": "string",
33
+ "_type": "Value"
34
+ },
35
+ "corpus-id": {
36
+ "dtype": "string",
37
+ "_type": "Value"
38
+ },
39
+ "score": {
40
+ "dtype": "float64",
41
+ "_type": "Value"
42
+ }
43
+ }
44
+ },
45
+ "splits": {
46
+ "test": {
47
+ "name": "test",
48
+ "num_bytes": 3870210,
49
+ "num_examples": 6634
50
+ }
51
+ }
52
+ }
53
+ }