Datasets:
				
			
			
	
			
	
		
			
	
		
		Why I can't pull this dataset? can someone tell me how should I pull it?
How I pull the dataset:
"from datasets import load_dataset
dataset = load_dataset("GETALP/flue", "CLS", trust_remote_code=True)    # ['CLS', 'PAWS-X', 'XNLI', 'WSD-V']
print(dataset)".
	
		
	
	
		And this is the ERROR I had:
"""
	
ReadError                                 Traceback (most recent call last)
/usr/local/lib/python3.10/dist-packages/datasets/builder.py in _prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, split_info, check_duplicate_keys, job_id)
   1606                 _time = time.time()
-> 1607                 for key, record in generator:
   1608                     if max_shard_size is not None and writer._num_bytes > max_shard_size:
13 frames
~/.cache/huggingface/modules/datasets_modules/datasets/GETALP--flue/c049b264364575c7cae4c07b324e06151d104cb9674773476881db54bd603aa6/flue.py in _generate_examples(self, data_file, split, files)
    385         if self.config.name == "CLS":
--> 386             for path, f in files:
    387                 for category in ["books", "dvd", "music"]:
/usr/local/lib/python3.10/dist-packages/datasets/utils/track.py in iter(self)
     49     def iter(self):
---> 50         for x in self.generator(*self.args):
     51             self.last_item = x
/usr/local/lib/python3.10/dist-packages/datasets/utils/file_utils.py in _iter_from_urlpath(cls, urlpath, download_config)
   1341             else:
-> 1342                 yield from cls._iter_tar(f)
   1343 
/usr/local/lib/python3.10/dist-packages/datasets/utils/file_utils.py in _iter_tar(f)
   1292     def _iter_tar(f):
-> 1293         stream = tarfile.open(fileobj=f, mode="r|*")
   1294         for tarinfo in stream:
/usr/lib/python3.10/tarfile.py in open(cls, name, mode, fileobj, bufsize, **kwargs)
   1857             try:
-> 1858                 t = cls(name, filemode, stream, **kwargs)
   1859             except:
/usr/lib/python3.10/tarfile.py in init(self, name, mode, fileobj, format, tarinfo, dereference, ignore_zeros, encoding, errors, pax_headers, debug, errorlevel, copybufsize)
   1735                 self.firstmember = None
-> 1736                 self.firstmember = self.next()
   1737 
/usr/lib/python3.10/tarfile.py in next(self)
   2635                 elif self.offset == 0:
-> 2636                     raise ReadError(str(e)) from None
   2637             except EmptyHeaderError:
ReadError: invalid header
The above exception was the direct cause of the following exception:
DatasetGenerationError                    Traceback (most recent call last)
 in <cell line: 2>()
      1 from datasets import load_dataset
----> 2 dataset = load_dataset("GETALP/flue", "CLS", trust_remote_code=True)    # ['CLS', 'PAWS-X', 'XNLI', 'WSD-V']
      3 print(dataset)
/usr/local/lib/python3.10/dist-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, keep_in_memory, save_infos, revision, token, streaming, num_proc, storage_options, trust_remote_code, **config_kwargs)
   2149
   2150     # Download and prepare data
-> 2151     builder_instance.download_and_prepare(
   2152         download_config=download_config,
   2153         download_mode=download_mode,
/usr/local/lib/python3.10/dist-packages/datasets/builder.py in download_and_prepare(self, output_dir, download_config, download_mode, verification_mode, dl_manager, base_path, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)
    922                     if num_proc is not None:
    923                         prepare_split_kwargs["num_proc"] = num_proc
--> 924                     self._download_and_prepare(
    925                         dl_manager=dl_manager,
    926                         verification_mode=verification_mode,
/usr/local/lib/python3.10/dist-packages/datasets/builder.py in _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs)
   1646
   1647     def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
-> 1648         super()._download_and_prepare(
   1649             dl_manager,
   1650             verification_mode,
/usr/local/lib/python3.10/dist-packages/datasets/builder.py in _download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs)
    998             try:
    999                 # Prepare split will record examples associated to the split
-> 1000                 self._prepare_split(split_generator, **prepare_split_kwargs)
   1001             except OSError as e:
   1002                 raise OSError(
/usr/local/lib/python3.10/dist-packages/datasets/builder.py in _prepare_split(self, split_generator, check_duplicate_keys, file_format, num_proc, max_shard_size)
   1484             job_id = 0
   1485             with pbar:
-> 1486                 for job_id, done, content in self._prepare_split_single(
   1487                     gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
   1488                 ):
/usr/local/lib/python3.10/dist-packages/datasets/builder.py in _prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, split_info, check_duplicate_keys, job_id)
   1641             if isinstance(e, SchemaInferenceError) and e.context is not None:
   1642                 e = e.context
-> 1643             raise DatasetGenerationError("An error occurred while generating the dataset") from e
   1644
   1645         yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
DatasetGenerationError: An error occurred while generating the dataset
"""
Dear @Neroism8422 ,
I hope you are doing well despite your frustration that transpires in the tone of your message (it is usually a common requirement to say hello, and please...).
Maybe it is related to the previous comment that clearly states that the data CLS task relies on is not open on zenodo anymore.
the original url is:             data_url="https://zenodo.org/record/3251672/files/cls-acl10-unprocessed.tar.gz",
(as taken from the python script).
The zenodo record now clearly indicate a restriction. https://zenodo.org/record/3251672
I suppose you may have to download it manually after asking for authorization to original dataset author on zenodo.
Regards,
Gilles,
Dear @Neroism8422 ,
I hope you are doing well despite your frustration that transpires in the tone of your message (it is usually a common requirement to say hello, and please...).
Maybe it is related to the previous comment that clearly states that the data CLS task relies on is not open on zenodo anymore.
the original url is: data_url="https://zenodo.org/record/3251672/files/cls-acl10-unprocessed.tar.gz",
(as taken from the python script).The zenodo record now clearly indicate a restriction. https://zenodo.org/record/3251672
I suppose you may have to download it manually after asking for authorization to original dataset author on zenodo.
Regards,
Gilles,
Sorry, Just, I know you might trying to remind me "How to ask a question", I'm kind of in hurry, sorry about that. And thank you for your answer, I've already got the dataset, just for a FLUE benchmark on my model, huge thanks.
Hi @Neroism8422 ,
Can I have the dataset as well? I don't have permission to the dataset through zenodo as well.
A help would be really appreciated ^^.
Thank you!
FLUE does not manage neither own the CLS data.
You have to ask the owner permission to the data through Zenodo. The data owner is the only person entitled to grant or refuse access.
 
						 
						