--- dataset_info: features: - name: word dtype: string - name: c dtype: int64 splits: - name: train num_bytes: 29834987 num_examples: 1497964 download_size: 14221529 dataset_size: 29834987 configs: - config_name: default data_files: - split: train path: data/train-* license: cc-by-sa-3.0 language: - en --- Wordcounts for the [English Wikipedia dump (2023-11-01)](https://huggingface.co/datasets/wikimedia/wikipedia), including words that occur at least 10 times in the corpus. Created using the following script: ```python import re import duckdb from collections import Counter from datasets import load_dataset from tqdm.auto import tqdm conn = duckdb.connect(":memory:") def ensure_db(conn: duckdb.DuckDBPyConnection): conn.execute(""" CREATE TABLE IF NOT EXISTS wc ( word TEXT PRIMARY KEY, c BIGINT ); """) ensure_db(conn) def merge_batch(conn: duckdb.DuckDBPyConnection, counts: Counter): if not counts: return df = pd.DataFrame({"word": list(counts.keys()), "c": list(map(int, counts.values()))}) # Register the batch dataframe as a view, then MERGE (UPSERT) conn.register("batch_df", df) conn.execute(""" MERGE INTO wc AS t USING batch_df AS s ON t.word = s.word WHEN MATCHED THEN UPDATE SET c = t.c + s.c WHEN NOT MATCHED THEN INSERT (word, c) VALUES (s.word, s.c); """) conn.unregister("batch_df") TOKEN_RE = re.compile(r"[a-z]+(?:'[a-z]+)?") # keep internal apostrophes def tokenize_en_lower(text: str): if not text: return [] return TOKEN_RE.findall(text.lower()) batch_size = 500 limit = 0 ds_iter = load_dataset("wikimedia/wikipedia", "20231101.en", split="train", streaming=True) buf = Counter() n = 0 pbar = tqdm(desc="Processing (streaming)", unit="art") for ex in ds_iter: buf.update(tokenize_en_lower(ex.get("text", ""))) n += 1 if n % batch_size == 0: merge_batch(conn, buf); buf.clear() pbar.update(batch_size) if limit and n >= limit: break if buf: merge_batch(conn, buf); pbar.update(n % batch_size) pbar.close() ```