Update script usage examples to use filename + `classify` command.

#6
by mit-ra - opened
Files changed (1) hide show
  1. README.md +18 -18
README.md CHANGED
@@ -22,7 +22,7 @@ This is a modified version of https://huggingface.co/datasets/uv-scripts/classif
22
 
23
  ```bash
24
  # Classify IMDB reviews
25
- uv run examples/classify-dataset.py \
26
  --input-dataset stanfordnlp/imdb \
27
  --column text \
28
  --labels "positive,negative" \
@@ -63,7 +63,7 @@ That's it! No installation, no setup - just `uv run`.
63
  ### Basic Classification
64
 
65
  ```bash
66
- uv run examples/classify-dataset.py \
67
  --input-dataset <dataset-id> \
68
  --column <text-column> \
69
  --labels <comma-separated-labels> \
@@ -98,7 +98,7 @@ uv run examples/classify-dataset.py \
98
  Provide context for your labels to improve classification accuracy:
99
 
100
  ```bash
101
- uv run examples/classify-dataset.py \
102
  --input-dataset user/support-tickets \
103
  --column content \
104
  --labels "bug,feature,question,other" \
@@ -114,7 +114,7 @@ The model uses these descriptions to better understand what each label represent
114
  Enable multi-label mode for documents that can have multiple applicable labels:
115
 
116
  ```bash
117
- uv run examples/classify-dataset.py \
118
  --input-dataset ag_news \
119
  --column text \
120
  --labels "world,sports,business,science" \
@@ -128,7 +128,7 @@ uv run examples/classify-dataset.py \
128
  ### Sentiment Analysis
129
 
130
  ```bash
131
- uv run examples/classify-dataset.py \
132
  --input-dataset stanfordnlp/imdb \
133
  --column text \
134
  --labels "positive,ambivalent,negative" \
@@ -139,7 +139,7 @@ uv run examples/classify-dataset.py \
139
  ### Support Ticket Classification
140
 
141
  ```bash
142
- uv run examples/classify-dataset.py \
143
  --input-dataset user/support-tickets \
144
  --column content \
145
  --labels "bug,feature_request,question,other" \
@@ -151,7 +151,7 @@ uv run examples/classify-dataset.py \
151
  ### News Categorization
152
 
153
  ```bash
154
- uv run examples/classify-dataset.py \
155
  --input-dataset ag_news \
156
  --column text \
157
  --labels "world,sports,business,tech" \
@@ -162,7 +162,7 @@ uv run examples/classify-dataset.py \
162
  ### Multi-Label News Classification
163
 
164
  ```bash
165
- uv run examples/classify-dataset.py \
166
  --input-dataset ag_news \
167
  --column text \
168
  --labels "world,sports,business,tech" \
@@ -180,7 +180,7 @@ Classify academic papers into machine learning research areas:
180
 
181
  ```bash
182
  # Fast classification with random sampling
183
- uv run examples/classify-dataset.py \
184
  --input-dataset librarian-bots/arxiv-metadata-snapshot \
185
  --column abstract \
186
  --labels "llm,computer_vision,reinforcement_learning,optimization,theory,other" \
@@ -192,7 +192,7 @@ uv run examples/classify-dataset.py \
192
  --shuffle
193
 
194
  # Multi-label for nuanced classification
195
- uv run examples/classify-dataset.py \
196
  --input-dataset librarian-bots/arxiv-metadata-snapshot \
197
  --column abstract \
198
  --labels "multimodal,agents,reasoning,safety,efficiency" \
@@ -212,7 +212,7 @@ This script is optimized to run locally on GPU-equipped machines:
212
 
213
  ```bash
214
  # Local execution with your GPU
215
- uv run examples/classify-dataset.py \
216
  --input-dataset stanfordnlp/imdb \
217
  --column text \
218
  --labels "positive,negative" \
@@ -230,7 +230,7 @@ When working with ordered datasets, use `--shuffle` with `--max-samples` to get
230
 
231
  ```bash
232
  # Get 50 random reviews instead of the first 50
233
- uv run examples/classify-dataset.py \
234
  --input-dataset stanfordnlp/imdb \
235
  --column text \
236
  --labels "positive,negative" \
@@ -248,7 +248,7 @@ By default, this script works with any instruction-tuned model. Here are some re
248
 
249
  ```bash
250
  # Lightweight model for fast classification
251
- uv run examples/classify-dataset.py \
252
  --input-dataset user/my-dataset \
253
  --column text \
254
  --labels "A,B,C" \
@@ -256,7 +256,7 @@ uv run examples/classify-dataset.py \
256
  --output-dataset user/classified
257
 
258
  # Larger model for complex classification
259
- uv run examples/classify-dataset.py \
260
  --input-dataset user/legal-docs \
261
  --column text \
262
  --labels "contract,patent,brief,memo,other" \
@@ -264,7 +264,7 @@ uv run examples/classify-dataset.py \
264
  --output-dataset user/legal-classified
265
 
266
  # Specialized zero-shot classifier
267
- uv run examples/classify-dataset.py \
268
  --input-dataset user/my-dataset \
269
  --column text \
270
  --labels "A,B,C" \
@@ -277,7 +277,7 @@ uv run examples/classify-dataset.py \
277
  Configure `--batch-size` for more effective batch processing with large datasets:
278
 
279
  ```bash
280
- uv run examples/classify-dataset.py \
281
  --input-dataset user/huge-dataset \
282
  --column text \
283
  --labels "A,B,C" \
@@ -342,7 +342,7 @@ Start with small tests, then run on the full dataset:
342
 
343
  ```bash
344
  # Step 1: Test with small sample
345
- uv run examples/classify-dataset.py \
346
  --input-dataset your-dataset \
347
  --column text \
348
  --labels "label1,label2,label3" \
@@ -351,7 +351,7 @@ uv run examples/classify-dataset.py \
351
  --max-samples 100
352
 
353
  # Step 2: If results look good, run on full dataset
354
- uv run examples/classify-dataset.py \
355
  --input-dataset your-dataset \
356
  --column text \
357
  --labels "label1,label2,label3" \
 
22
 
23
  ```bash
24
  # Classify IMDB reviews
25
+ uv run classify-dataset.py classify \
26
  --input-dataset stanfordnlp/imdb \
27
  --column text \
28
  --labels "positive,negative" \
 
63
  ### Basic Classification
64
 
65
  ```bash
66
+ uv run classify-dataset.py classify \
67
  --input-dataset <dataset-id> \
68
  --column <text-column> \
69
  --labels <comma-separated-labels> \
 
98
  Provide context for your labels to improve classification accuracy:
99
 
100
  ```bash
101
+ uv run classify-dataset.py classify \
102
  --input-dataset user/support-tickets \
103
  --column content \
104
  --labels "bug,feature,question,other" \
 
114
  Enable multi-label mode for documents that can have multiple applicable labels:
115
 
116
  ```bash
117
+ uv run classify-dataset.py classify \
118
  --input-dataset ag_news \
119
  --column text \
120
  --labels "world,sports,business,science" \
 
128
  ### Sentiment Analysis
129
 
130
  ```bash
131
+ uv run classify-dataset.py classify \
132
  --input-dataset stanfordnlp/imdb \
133
  --column text \
134
  --labels "positive,ambivalent,negative" \
 
139
  ### Support Ticket Classification
140
 
141
  ```bash
142
+ uv run classify-dataset.py classify \
143
  --input-dataset user/support-tickets \
144
  --column content \
145
  --labels "bug,feature_request,question,other" \
 
151
  ### News Categorization
152
 
153
  ```bash
154
+ uv run classify-dataset.py classify \
155
  --input-dataset ag_news \
156
  --column text \
157
  --labels "world,sports,business,tech" \
 
162
  ### Multi-Label News Classification
163
 
164
  ```bash
165
+ uv run classify-dataset.py classify \
166
  --input-dataset ag_news \
167
  --column text \
168
  --labels "world,sports,business,tech" \
 
180
 
181
  ```bash
182
  # Fast classification with random sampling
183
+ uv run classify-dataset.py classify \
184
  --input-dataset librarian-bots/arxiv-metadata-snapshot \
185
  --column abstract \
186
  --labels "llm,computer_vision,reinforcement_learning,optimization,theory,other" \
 
192
  --shuffle
193
 
194
  # Multi-label for nuanced classification
195
+ uv run classify-dataset.py classify \
196
  --input-dataset librarian-bots/arxiv-metadata-snapshot \
197
  --column abstract \
198
  --labels "multimodal,agents,reasoning,safety,efficiency" \
 
212
 
213
  ```bash
214
  # Local execution with your GPU
215
+ uv run classify-dataset.py classify \
216
  --input-dataset stanfordnlp/imdb \
217
  --column text \
218
  --labels "positive,negative" \
 
230
 
231
  ```bash
232
  # Get 50 random reviews instead of the first 50
233
+ uv run classify-dataset.py classify \
234
  --input-dataset stanfordnlp/imdb \
235
  --column text \
236
  --labels "positive,negative" \
 
248
 
249
  ```bash
250
  # Lightweight model for fast classification
251
+ uv run classify-dataset.py classify \
252
  --input-dataset user/my-dataset \
253
  --column text \
254
  --labels "A,B,C" \
 
256
  --output-dataset user/classified
257
 
258
  # Larger model for complex classification
259
+ uv run classify-dataset.py classify \
260
  --input-dataset user/legal-docs \
261
  --column text \
262
  --labels "contract,patent,brief,memo,other" \
 
264
  --output-dataset user/legal-classified
265
 
266
  # Specialized zero-shot classifier
267
+ uv run classify-dataset.py classify \
268
  --input-dataset user/my-dataset \
269
  --column text \
270
  --labels "A,B,C" \
 
277
  Configure `--batch-size` for more effective batch processing with large datasets:
278
 
279
  ```bash
280
+ uv run classify-dataset.py classify \
281
  --input-dataset user/huge-dataset \
282
  --column text \
283
  --labels "A,B,C" \
 
342
 
343
  ```bash
344
  # Step 1: Test with small sample
345
+ uv run classify-dataset.py classify \
346
  --input-dataset your-dataset \
347
  --column text \
348
  --labels "label1,label2,label3" \
 
351
  --max-samples 100
352
 
353
  # Step 2: If results look good, run on full dataset
354
+ uv run classify-dataset.py classify \
355
  --input-dataset your-dataset \
356
  --column text \
357
  --labels "label1,label2,label3" \