Datasets:
analysis and generation of metadata in commit 15fccf29
Browse files- notebooks/lilabc_CT.ipynb +0 -0
- notebooks/lilabc_CT.py +169 -365
notebooks/lilabc_CT.ipynb
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
notebooks/lilabc_CT.py
CHANGED
|
@@ -20,7 +20,7 @@ import seaborn as sns
|
|
| 20 |
sns.set_style("whitegrid")
|
| 21 |
|
| 22 |
# %%
|
| 23 |
-
df = pd.read_csv("../data/lila_image_urls_and_labels.csv")
|
| 24 |
df.head()
|
| 25 |
|
| 26 |
# %%
|
|
@@ -31,541 +31,345 @@ df.annotation_level.value_counts()
|
|
| 31 |
|
| 32 |
# %% [markdown]
|
| 33 |
# Annotation level indicates iimage vs sequence (or unknown), not analogous to `taxonomy_level` from lila-taxonomy-mapping_release.csv. It seems `original_label` may be the analogous column.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
# %%
|
| 36 |
-
df.
|
| 37 |
-
|
| 38 |
-
# %%
|
| 39 |
-
df.info(show_counts = True)
|
| 40 |
|
| 41 |
# %%
|
| 42 |
-
df.
|
| 43 |
|
| 44 |
# %% [markdown]
|
| 45 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
# %%
|
| 48 |
-
|
| 49 |
-
df.loc[df.species == "homo sapien"]
|
| 50 |
|
| 51 |
# %% [markdown]
|
| 52 |
-
#
|
| 53 |
|
| 54 |
# %%
|
| 55 |
-
|
| 56 |
|
| 57 |
# %% [markdown]
|
| 58 |
-
#
|
| 59 |
#
|
| 60 |
-
#
|
| 61 |
-
|
| 62 |
-
#
|
| 63 |
-
df_cleaned[['genus', 'species']].info(show_counts = True)
|
| 64 |
-
|
| 65 |
-
# %%
|
| 66 |
-
df_cleaned[['genus', 'species']].nunique()
|
| 67 |
-
|
| 68 |
-
# %%
|
| 69 |
-
df_cleaned.loc[df_cleaned['genus'].isna(), 'species'].value_counts()
|
| 70 |
-
|
| 71 |
-
# %% [markdown]
|
| 72 |
-
# All entries missing `genus` are also missing `species`, so we'll drop all entries with null `genus`.
|
| 73 |
-
|
| 74 |
-
# %%
|
| 75 |
-
df_genusSpecies = df_cleaned.dropna(subset = "genus")
|
| 76 |
-
|
| 77 |
-
# %%
|
| 78 |
-
df_genusSpecies.info(show_counts = True)
|
| 79 |
|
| 80 |
# %%
|
| 81 |
-
|
| 82 |
|
| 83 |
# %% [markdown]
|
| 84 |
-
#
|
| 85 |
#
|
| 86 |
-
#
|
| 87 |
|
| 88 |
# %%
|
| 89 |
-
|
|
|
|
| 90 |
|
| 91 |
# %% [markdown]
|
| 92 |
-
#
|
| 93 |
-
|
| 94 |
-
# %%
|
| 95 |
-
df_genusSpecies.duplicated(subset = ['url'], keep = 'first').value_counts()
|
| 96 |
|
| 97 |
# %%
|
| 98 |
-
|
| 99 |
|
| 100 |
# %% [markdown]
|
| 101 |
-
#
|
| 102 |
-
|
| 103 |
-
# %%
|
| 104 |
-
df_genusSpecies['url_dupe'] = df_genusSpecies.duplicated(subset = ['url'], keep = False)
|
| 105 |
-
|
| 106 |
-
# %%
|
| 107 |
-
df_genusSpecies.sample(10)
|
| 108 |
|
| 109 |
# %%
|
| 110 |
-
|
| 111 |
-
duplicated_urls.head(10)
|
| 112 |
|
| 113 |
# %% [markdown]
|
| 114 |
-
#
|
| 115 |
-
|
| 116 |
-
# %% [markdown]
|
| 117 |
-
# We have 1,046,985 distinct sequence IDs for the 6,901,819 unique image IDs, suggesting an average of 6 images per sequence?
|
| 118 |
|
| 119 |
# %%
|
| 120 |
-
|
| 121 |
|
| 122 |
# %% [markdown]
|
| 123 |
-
#
|
| 124 |
#
|
| 125 |
-
#
|
| 126 |
|
| 127 |
# %%
|
| 128 |
-
|
| 129 |
-
dedupe_genusSpecies.head()
|
| 130 |
-
|
| 131 |
-
# %% [markdown]
|
| 132 |
-
# Let's quickly check our stats on this subset (eg, `speices`/`genus` values).
|
| 133 |
|
| 134 |
# %%
|
| 135 |
-
|
| 136 |
|
| 137 |
# %%
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
dedupe_genusSpecies[cols].nunique()
|
| 142 |
|
| 143 |
# %% [markdown]
|
| 144 |
-
#
|
| 145 |
#
|
| 146 |
-
#
|
| 147 |
-
|
| 148 |
-
#
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
#
|
| 153 |
-
#
|
|
|
|
| 154 |
|
| 155 |
# %%
|
| 156 |
-
|
| 157 |
|
| 158 |
-
|
| 159 |
-
dedupe_species[cols].nunique()
|
| 160 |
|
| 161 |
# %% [markdown]
|
| 162 |
-
# We
|
| 163 |
|
| 164 |
# %%
|
| 165 |
-
|
| 166 |
-
dedupe_species['url'].head().values
|
| 167 |
-
|
| 168 |
-
# %% [markdown]
|
| 169 |
-
# ## Save a Species Label CSV
|
| 170 |
-
#
|
| 171 |
-
# Dataset with all images that have labels down to the species level. We will get some stats on this to determine how to truncate to just one instance of each animal per sequence.
|
| 172 |
|
| 173 |
# %%
|
| 174 |
-
|
|
|
|
|
|
|
|
|
|
| 175 |
|
| 176 |
# %% [markdown]
|
| 177 |
-
#
|
| 178 |
-
#
|
| 179 |
-
# Let's get some statistics on this data to help narrow it down:
|
| 180 |
-
# - Do we have instances of common name matching scientific name? There are more unique instances of `common_name` than `species` or `scientific_name`.
|
| 181 |
-
# - What is the minimum number of instances of a particular species? We'll balance the datset to have the smallest number available (assuming 20+ images).
|
| 182 |
-
# - For later evaluation, what's the distribution of time of day (will be more meaningful for the finalized species dataset)?
|
| 183 |
|
| 184 |
# %%
|
| 185 |
-
|
| 186 |
|
| 187 |
# %%
|
| 188 |
-
|
| 189 |
|
| 190 |
-
# %%
|
| 191 |
-
|
| 192 |
|
| 193 |
# %%
|
| 194 |
-
|
| 195 |
|
| 196 |
# %% [markdown]
|
| 197 |
-
#
|
| 198 |
#
|
| 199 |
-
#
|
| 200 |
-
|
| 201 |
-
# %%
|
| 202 |
-
num_species_dict = {}
|
| 203 |
-
for species in dedupe_species.species.unique():
|
| 204 |
-
num_species = len(dedupe_species.loc[dedupe_species.species == species])
|
| 205 |
-
num_species_dict[species] = num_species
|
| 206 |
-
num_species_dict
|
| 207 |
|
| 208 |
# %%
|
| 209 |
-
|
| 210 |
-
for species, num in num_species_dict.items():
|
| 211 |
-
dedupe_species.loc[dedupe_species.species == species, 'num_species'] = num
|
| 212 |
-
|
| 213 |
-
# %%
|
| 214 |
-
more_samples = {}
|
| 215 |
-
for species, num in num_species_dict.items():
|
| 216 |
-
if num >= 40:
|
| 217 |
-
more_samples[species] = num
|
| 218 |
-
|
| 219 |
-
more_samples
|
| 220 |
-
|
| 221 |
-
# %%
|
| 222 |
-
print("We have ", len(more_samples), " species for which there are at least 40 images.")
|
| 223 |
-
|
| 224 |
-
# %%
|
| 225 |
-
mid_samples = {}
|
| 226 |
-
for species, num in num_species_dict.items():
|
| 227 |
-
if num >= 20:
|
| 228 |
-
mid_samples[species] = num
|
| 229 |
-
|
| 230 |
-
print("We have ", len(mid_samples), " species for which there are at least 20 images.")
|
| 231 |
|
| 232 |
# %% [markdown]
|
| 233 |
-
#
|
| 234 |
#
|
| 235 |
-
#
|
| 236 |
|
| 237 |
# %%
|
| 238 |
-
|
| 239 |
|
| 240 |
-
|
| 241 |
-
dedupe_species.head()
|
| 242 |
-
|
| 243 |
-
# %%
|
| 244 |
-
dedupe_species['multi-image'].value_counts()
|
| 245 |
-
|
| 246 |
-
# %%
|
| 247 |
-
species_single_seq = dedupe_species.loc[~dedupe_species['multi-image']]
|
| 248 |
-
species_single_seq.sample(7)
|
| 249 |
-
|
| 250 |
-
# %%
|
| 251 |
-
species_single_seq.to_csv("../data/lila_image_urls_and_labels_SingleSpecies.csv", index = False)
|
| 252 |
-
|
| 253 |
-
# %%
|
| 254 |
-
species_single_seq[cols].nunique()
|
| 255 |
|
| 256 |
# %% [markdown]
|
| 257 |
-
#
|
| 258 |
-
|
| 259 |
-
# %% [markdown]
|
| 260 |
-
# #### Check Number of Images per Species
|
| 261 |
-
|
| 262 |
-
# %%
|
| 263 |
-
num_singleSpecies_dict = {}
|
| 264 |
-
for species in species_single_seq.species.unique():
|
| 265 |
-
num_species = len(species_single_seq.loc[species_single_seq.species == species])
|
| 266 |
-
num_singleSpecies_dict[species] = num_species
|
| 267 |
-
num_singleSpecies_dict
|
| 268 |
-
|
| 269 |
-
# %%
|
| 270 |
-
more_singleSamples = {}
|
| 271 |
-
for species, num in num_singleSpecies_dict.items():
|
| 272 |
-
if num >= 40:
|
| 273 |
-
more_singleSamples[species] = num
|
| 274 |
-
|
| 275 |
-
print("We have ", len(more_singleSamples), " species for which there are at least 40 images.")
|
| 276 |
-
print()
|
| 277 |
-
|
| 278 |
-
mid_singleSamples = {}
|
| 279 |
-
for species, num in num_singleSpecies_dict.items():
|
| 280 |
-
if num >= 20:
|
| 281 |
-
mid_singleSamples[species] = num
|
| 282 |
-
|
| 283 |
-
print("We have ", len(mid_singleSamples), " species for which there are at least 20 images.")
|
| 284 |
|
| 285 |
# %%
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 302 |
|
| 303 |
# %% [markdown]
|
| 304 |
-
#
|
| 305 |
|
| 306 |
# %%
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
sns.set_style("whitegrid")
|
| 310 |
-
sns.set(rc = {'figure.figsize': (10,10)})
|
| 311 |
-
|
| 312 |
-
# %%
|
| 313 |
-
sns.histplot(species_single_seq.loc[species_single_seq.num_singleSpecies >= 40], y = 'family')
|
| 314 |
-
|
| 315 |
-
# %%
|
| 316 |
-
sns.histplot(species_single_seq.loc[species_single_seq.num_singleSpecies >= 20], y = 'family')
|
| 317 |
|
| 318 |
# %% [markdown]
|
| 319 |
-
#
|
| 320 |
-
# This can occur when images are labeled as a sequence, and it is suggested that it is not an unusual occurance. We will use the [MegaDetector results](https://lila.science/megadetector-results-for-camera-trap-datasets/) provided by LILA BC to run a check for these. Ultimately, we won't use repeated instances of the same animal in the same sequence so the process of removing extra instances may also alleviate this issue. Additionally, it's worth noting that MegaDector is trained to err on the side of detection (i.e., it's more likely to see an animal that's not there), so not likely to change our assessment.
|
| 321 |
#
|
| 322 |
-
# ####
|
| 323 |
-
|
| 324 |
-
# %% [markdown]
|
| 325 |
-
# Look up a couple sample file values in the model results JSON (with and without detection).
|
| 326 |
|
| 327 |
# %%
|
| 328 |
-
|
|
|
|
|
|
|
|
|
|
| 329 |
|
| 330 |
-
# %%
|
| 331 |
-
|
|
|
|
| 332 |
|
| 333 |
# %%
|
| 334 |
-
|
| 335 |
|
| 336 |
-
# %%
|
| 337 |
-
|
| 338 |
|
| 339 |
# %%
|
| 340 |
-
|
| 341 |
|
| 342 |
# %% [markdown]
|
| 343 |
-
#
|
| 344 |
|
| 345 |
# %%
|
| 346 |
-
|
| 347 |
-
"Channel Islands Camera Traps": "channel-islands-camera-traps_mdv5b.0.0_results.json",
|
| 348 |
-
"ENA24": "ena24_mdv5b.0.0_results.json",
|
| 349 |
-
"Idaho Camera Traps": "idaho-camera-traps_mdv5b.0.0_results.json",
|
| 350 |
-
"Island Conservation Camera Traps": "island-conservation-camera-traps_mdv5b.0.0_results.json",
|
| 351 |
-
"Missouri Camera Traps": "missouri-camera-traps_mdv5b.0.0_results.json",
|
| 352 |
-
"Orinoquia Camera Traps": "orinoquia-camera-traps_public_mdv5b.0.0_results.json",
|
| 353 |
-
"Snapshot Camdeboo": "snapshot-safari_CDB_mdv5b.0.0_results.json",
|
| 354 |
-
"Snapshot Enonkishu": "snapshot-safari_ENO_mdv5b.0.0_results.json",
|
| 355 |
-
"Snapshot Karoo": "snapshot-safari_KAR_mdv5b.0.0_results.json",
|
| 356 |
-
"Snapshot Kgalagadi": "snapshot-safari_KGA_mdv5b.0.0_results.json",
|
| 357 |
-
"Snapshot Kruger": "snapshot-safari_KRU_mdv5b.0.0_results.json",
|
| 358 |
-
"Snapshot Mountain Zebra": "snapshot-safari_MTZ_mdv5b.0.0_results.json",
|
| 359 |
-
"SWG Camera Traps": "swg-camera-traps_public_mdv5b.0.0_results.json",
|
| 360 |
-
"WCS Camera Traps": "wcs-camera-traps_animals_mdv5b.0.0_results.json",
|
| 361 |
-
"Wellington Camera Traps": "wellington-camera-traps_images_mdv5b.0.0_results.json"}
|
| 362 |
-
|
| 363 |
|
| 364 |
# %%
|
| 365 |
-
|
| 366 |
-
with open("../MegaDetector_results/" + filename) as file:
|
| 367 |
-
data = json.load(file)
|
| 368 |
-
df_mdv5b = pd.json_normalize(data["images"], max_level = 1)
|
| 369 |
-
print(df_mdv5b.head())
|
| 370 |
-
dedupe_url = list(dedupe_species.loc[dedupe_species["dataset_name"] == dataset_name, 'url'])
|
| 371 |
-
dedupe_url_empties = []
|
| 372 |
-
for file in list(df_mdv5b.loc[(df_mdv5b['max_detection_conf'] <= 80 & df_mdv5b['detections'].astype(str) != '[]'), 'file']):
|
| 373 |
-
if file in dedupe_url:
|
| 374 |
-
dedupe_url_empties.append(file)
|
| 375 |
-
print(dataset_name, ": ", dedupe_url_empties)
|
| 376 |
-
return dedupe_url_empties
|
| 377 |
-
|
| 378 |
|
| 379 |
# %% [markdown]
|
| 380 |
-
#
|
| 381 |
|
| 382 |
# %%
|
| 383 |
-
|
| 384 |
-
|
|
|
|
|
|
|
| 385 |
|
| 386 |
-
# %%
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
# %%
|
| 390 |
-
cct_mdv5a = pd.json_normalize(data["images"], max_level = 1)
|
| 391 |
|
| 392 |
# %%
|
| 393 |
-
|
| 394 |
|
| 395 |
# %% [markdown]
|
| 396 |
-
#
|
| 397 |
|
| 398 |
# %%
|
| 399 |
-
|
| 400 |
|
| 401 |
-
# %%
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
# %%
|
| 405 |
-
cct_mdv5a.loc[cct_mdv5a['detections'].astype(str) == '[]']
|
| 406 |
|
| 407 |
# %%
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
for file in list(cct_mdv5a.loc[cct_mdv5a['detections'].astype(str) == '[]', 'file']):
|
| 411 |
-
if file in dedupe_url_cct:
|
| 412 |
-
dedupe_cct_empties.append(file)
|
| 413 |
-
|
| 414 |
-
dedupe_cct_empties
|
| 415 |
-
|
| 416 |
-
# %% [markdown]
|
| 417 |
-
# CCT Seem fine
|
| 418 |
|
| 419 |
# %% [markdown]
|
| 420 |
-
#
|
| 421 |
-
# Since these have a note that empties may be labeled as species within the sequence, let's check this as well.
|
| 422 |
|
| 423 |
# %%
|
| 424 |
-
|
| 425 |
-
data = json.load(file)
|
| 426 |
|
| 427 |
-
|
| 428 |
-
|
| 429 |
|
| 430 |
# %% [markdown]
|
| 431 |
-
#
|
| 432 |
|
| 433 |
# %%
|
| 434 |
-
|
| 435 |
|
| 436 |
# %%
|
| 437 |
-
|
| 438 |
|
| 439 |
# %%
|
| 440 |
-
|
| 441 |
-
|
| 442 |
-
# %%
|
| 443 |
-
dedupe_url_wcs = list(dedupe_species.loc[dedupe_species['dataset_name'] == "WCS Camera Traps", 'url'])
|
| 444 |
-
dedupe_wcs_empties = []
|
| 445 |
-
for file in list(wcs_mdv5a.loc[wcs_mdv5a['detections'].astype(str) == '[]', 'file']):
|
| 446 |
-
if file in dedupe_url_wcs:
|
| 447 |
-
dedupe_wcs_empties.append(file)
|
| 448 |
-
|
| 449 |
-
dedupe_wcs_empties
|
| 450 |
|
| 451 |
# %% [markdown]
|
| 452 |
-
#
|
|
|
|
|
|
|
| 453 |
|
| 454 |
# %%
|
| 455 |
-
|
| 456 |
-
data = json.load(file)
|
| 457 |
|
| 458 |
# %%
|
| 459 |
-
|
| 460 |
-
|
| 461 |
|
| 462 |
# %%
|
| 463 |
-
|
| 464 |
-
for file in list(wcs_mdv5b.loc[wcs_mdv5b['detections'].astype(str) == '[]', 'file']):
|
| 465 |
-
if file in dedupe_url_wcs:
|
| 466 |
-
dedupe_wcs_empties.append(file)
|
| 467 |
-
|
| 468 |
-
dedupe_wcs_empties
|
| 469 |
|
| 470 |
# %% [markdown]
|
| 471 |
-
#
|
| 472 |
|
| 473 |
# %%
|
| 474 |
-
|
| 475 |
-
data = json.load(file)
|
| 476 |
|
| 477 |
# %%
|
| 478 |
-
|
| 479 |
-
nacti_mdv5b.head()
|
| 480 |
|
| 481 |
# %%
|
| 482 |
-
|
| 483 |
-
dedupe_nacti_empties = []
|
| 484 |
-
for file in list(nacti_mdv5b.loc[nacti_mdv5b['detections'].astype(str) == '[]', 'file']):
|
| 485 |
-
if file in dedupe_url_nacti:
|
| 486 |
-
dedupe_nacti_empties.append(file)
|
| 487 |
|
| 488 |
-
|
|
|
|
| 489 |
|
|
|
|
|
|
|
| 490 |
|
| 491 |
# %% [markdown]
|
| 492 |
-
#
|
| 493 |
-
#
|
| 494 |
-
# Let's automate the process to speed this up with a function to take the dataset and filename and return any marked empty by the model.
|
| 495 |
|
| 496 |
# %%
|
| 497 |
-
|
| 498 |
-
with open("../MegaDetector_results/" + filename) as file:
|
| 499 |
-
data = json.load(file)
|
| 500 |
-
df_mdv5b = pd.json_normalize(data["images"], max_level = 1)
|
| 501 |
-
print(df_mdv5b.head())
|
| 502 |
-
dedupe_url = list(dedupe_species.loc[dedupe_species["dataset_name"] == dataset_name, 'url'])
|
| 503 |
-
dedupe_url_empties = []
|
| 504 |
-
for file in list(df_mdv5b.loc[df_mdv5b['detections'].astype(str) == '[]', 'file']):
|
| 505 |
-
if file in dedupe_url:
|
| 506 |
-
dedupe_url_empties.append(file)
|
| 507 |
-
print(dataset_name, ": ", dedupe_url_empties)
|
| 508 |
-
return dedupe_url_empties
|
| 509 |
|
|
|
|
|
|
|
| 510 |
|
| 511 |
# %%
|
| 512 |
-
|
| 513 |
-
"ENA24": "ena24_mdv5b.0.0_results.json",
|
| 514 |
-
"Idaho Camera Traps": "idaho-camera-traps_mdv5b.0.0_results.json",
|
| 515 |
-
"Island Conservation Camera Traps": "island-conservation-camera-traps_mdv5b.0.0_results.json",
|
| 516 |
-
"Missouri Camera Traps": "missouri-camera-traps_mdv5b.0.0_results.json",
|
| 517 |
-
"Orinoquia Camera Traps": "orinoquia-camera-traps_public_mdv5b.0.0_results.json",
|
| 518 |
-
"Snapshot Camdeboo": "snapshot-safari_CDB_mdv5b.0.0_results.json",
|
| 519 |
-
"Snapshot Enonkishu": "snapshot-safari_ENO_mdv5b.0.0_results.json",
|
| 520 |
-
"Snapshot Karoo": "snapshot-safari_KAR_mdv5b.0.0_results.json",
|
| 521 |
-
"Snapshot Kgalagadi": "snapshot-safari_KGA_mdv5b.0.0_results.json",
|
| 522 |
-
"Snapshot Kruger": "snapshot-safari_KRU_mdv5b.0.0_results.json",
|
| 523 |
-
"Snapshot Mountain Zebra": "snapshot-safari_MTZ_mdv5b.0.0_results.json",
|
| 524 |
-
"SWG Camera Traps": "swg-camera-traps_public_mdv5b.0.0_results.json",
|
| 525 |
-
"Wellington Camera Traps": "wellington-camera-traps_images_mdv5b.0.0_results.json"}
|
| 526 |
-
|
| 527 |
|
| 528 |
# %%
|
| 529 |
-
|
| 530 |
-
for key in list(mdv5b_files.keys()):
|
| 531 |
-
empties[key] = check_md_results(key, mdv5b_files[key])
|
| 532 |
|
| 533 |
# %%
|
| 534 |
-
|
| 535 |
|
| 536 |
-
|
| 537 |
-
|
| 538 |
-
#
|
| 539 |
-
# Snapshot Serengeti was evaluated with MegaDetector v4 due to questions raised by [this issue](https://github.com/ultralytics/yolov5/issues/9294) as noted on [LILA BC's site](https://lila.science/megadetector-results-for-camera-trap-datasets/).
|
| 540 |
|
| 541 |
# %%
|
| 542 |
-
|
| 543 |
-
mdv4_files = ["snapshot-serengeti-mdv4.1.0_results.json/snapshot-serengeti_S" + str(i) + "_mdv4.1.0_results.json" for i in range(1,11)]
|
| 544 |
-
mdv4_files.append("snapshot-serengeti-mdv4.1.0_results.json/snapshot-serengeti_SER_S11_mdv4.1.0_results.json")
|
| 545 |
|
| 546 |
# %%
|
| 547 |
-
|
| 548 |
-
|
| 549 |
-
|
|
|
|
| 550 |
|
| 551 |
# %%
|
| 552 |
-
|
| 553 |
-
for file in mdv4_files[4:]:
|
| 554 |
-
mdv4_empties[file] = check_md_results("Snapshot Serengeti", file)
|
| 555 |
|
| 556 |
# %% [markdown]
|
| 557 |
-
#
|
| 558 |
|
| 559 |
# %%
|
| 560 |
-
with open("../MegaDetector_results/snapshot-serengeti-mdv4.1.0_results.json/snapshot-serengeti_S1_mdv4.1.0_results.json") as file:
|
| 561 |
-
data = json.load(file)
|
| 562 |
|
| 563 |
-
|
| 564 |
|
| 565 |
# %%
|
| 566 |
-
|
| 567 |
|
| 568 |
# %%
|
| 569 |
-
|
| 570 |
|
| 571 |
# %%
|
|
|
|
| 20 |
sns.set_style("whitegrid")
|
| 21 |
|
| 22 |
# %%
|
| 23 |
+
df = pd.read_csv("../data/lila_image_urls_and_labels.csv", low_memory = False)
|
| 24 |
df.head()
|
| 25 |
|
| 26 |
# %%
|
|
|
|
| 31 |
|
| 32 |
# %% [markdown]
|
| 33 |
# Annotation level indicates iimage vs sequence (or unknown), not analogous to `taxonomy_level` from lila-taxonomy-mapping_release.csv. It seems `original_label` may be the analogous column.
|
| 34 |
+
#
|
| 35 |
+
# We'll likely want to pull out the image-level before doing any sequence checks and such since those should be "clean" images. Though we will want to label them with how many distinct species are in the image first.
|
| 36 |
+
#
|
| 37 |
+
# We now have 66 less sequence-level annotations and 2,517,374 more image-level! That's quite the update! The unknown count has not changed.
|
| 38 |
+
#
|
| 39 |
+
# ### Check Dataset Counts
|
| 40 |
+
#
|
| 41 |
+
# 1. Make sure we have all datasets expected.
|
| 42 |
+
# 2. Check which/how many datasets are labeled to the image level (and check for match to [Andrey's spreadsheet](https://docs.google.com/spreadsheets/d/1sC90DolAvswDUJ1lNSf0sk_norR24LwzX2O4g9OxMZE/edit?usp=drive_link)).
|
| 43 |
|
| 44 |
# %%
|
| 45 |
+
df.dataset_name.value_counts()
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
# %%
|
| 48 |
+
df.groupby(["dataset_name"]).annotation_level.value_counts()
|
| 49 |
|
| 50 |
# %% [markdown]
|
| 51 |
+
# It seems all the unknown annotation level images are in NACTI (North American Camera Trap Images). At first glance I don't see annotation level information on HF or on [their LILA page](https://lila.science/datasets/nacti)--will require more looking.
|
| 52 |
+
#
|
| 53 |
+
# Desert Lion Conservation Camera Traps & Trail Camera Images of New Zealand Animals are _not_ included in the [Hugging Face dataset](https://huggingface.co/datasets/society-ethics/lila_camera_traps).
|
| 54 |
+
#
|
| 55 |
+
# There are definitely more in [Andrey's spreadsheet](https://docs.google.com/spreadsheets/d/1sC90DolAvswDUJ1lNSf0sk_norR24LwzX2O4g9OxMZE/edit?usp=drive_link) that aren't included here. We'll have him go through those too.
|
| 56 |
|
| 57 |
# %%
|
| 58 |
+
df.sample(10)
|
|
|
|
| 59 |
|
| 60 |
# %% [markdown]
|
| 61 |
+
# Observe that we also now get multiple URL options; `url_aws` will likely be best/fastest for use with [`distributed-downloader`](https://github.com/Imageomics/distributed-downloader) to get the images.
|
| 62 |
|
| 63 |
# %%
|
| 64 |
+
df.info(show_counts = True)
|
| 65 |
|
| 66 |
# %% [markdown]
|
| 67 |
+
# The overall dataset has grown by about 3 million images, we'll see how much of this is non-empty. I'm encouraged by the number of non-null `scientific_name` values seeming to also grow by about 3 million; most of these also seem to have genus now.
|
| 68 |
#
|
| 69 |
+
# We'll definitely want to check on the scientifc name choices where genus and species aren't available, similarly for other ranks, as it is guarunteed as much as kingdom (which is hopefully aligned with all non-empty images).
|
| 70 |
+
#
|
| 71 |
+
# No licensing info, we'll get that from HF or the datasets themselves (Andrey can check this; most seem to be [Community Data License Agreement (permissive variant)](https://cdla.io/permissive-1-0/)).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
# %%
|
| 74 |
+
df.nunique()
|
| 75 |
|
| 76 |
# %% [markdown]
|
| 77 |
+
# We have 739 unique species indicated, though the 908 unique `scientific_name` values is likely more indicative of the diversity.
|
| 78 |
#
|
| 79 |
+
# Interesting also to note that there are duplicate URLs here; these would be the indicators of multiple species in an image as they correspond to the number of unique image IDs. We'll check this out once we remove the images labeled as "empty".
|
| 80 |
|
| 81 |
# %%
|
| 82 |
+
#check for humans
|
| 83 |
+
df.loc[df.species == "homo sapien"]
|
| 84 |
|
| 85 |
# %% [markdown]
|
| 86 |
+
# Let's start by removing entries with `original_label`: `empty`.
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
# %%
|
| 89 |
+
df_cleaned = df.loc[df.original_label != "empty"].copy()
|
| 90 |
|
| 91 |
# %% [markdown]
|
| 92 |
+
# ## Save the Reduced Data (no more "empty" labels)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
| 94 |
# %%
|
| 95 |
+
df_cleaned.to_csv("../data/lila_image_urls_and_labels.csv", index = False)
|
|
|
|
| 96 |
|
| 97 |
# %% [markdown]
|
| 98 |
+
# Let's check where we are with annotations now that we've removed all the images labeled as empty.
|
|
|
|
|
|
|
|
|
|
| 99 |
|
| 100 |
# %%
|
| 101 |
+
df.groupby(["dataset_name"]).annotation_level.value_counts()
|
| 102 |
|
| 103 |
# %% [markdown]
|
| 104 |
+
# We started with 19,351,156 entries, and are left with 10,965,902 after removing all labeled as `empty`, so more than half the images now; it's an increase of about 2.5M from the last version.
|
| 105 |
#
|
| 106 |
+
# Note that there are still about 3.4 million that don't have the species label, 1.5 million that are missing genus designation. 10,192,703 of them have scientific and common name, though! That's nearly all of them.
|
| 107 |
|
| 108 |
# %%
|
| 109 |
+
df_cleaned.info(show_counts = True)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
# %%
|
| 112 |
+
df_cleaned.nunique()
|
| 113 |
|
| 114 |
# %%
|
| 115 |
+
print(df_cleaned.phylum.value_counts())
|
| 116 |
+
print()
|
| 117 |
+
print(df_cleaned["class"].value_counts())
|
|
|
|
| 118 |
|
| 119 |
# %% [markdown]
|
| 120 |
+
# We have 10,965,902 total - 10,864,013 unique URLs, suggesting at most 101,889 images have more than one species in them. That's only 1% of our images here and even smaller at the scale we're looking for the next ToL dataset. It is interesting to note though and we should explore this more.
|
| 121 |
#
|
| 122 |
+
# I'm curious about the single "variety", since I thought that was more of a plant label and these are all animals.
|
| 123 |
+
#
|
| 124 |
+
# All images are in Animalia, as expected; we have 2 phyla represented and 8 classes:
|
| 125 |
+
# - Predominantly Chordata, and within that phylum, Mammalia is the vast majority, though aves is about 10%.
|
| 126 |
+
# - Note that not every image with a phylum label has a class label.
|
| 127 |
+
# - Insecta, malacostraca, arachnida, and diplopoda are all in the class Arthropoda.
|
| 128 |
+
#
|
| 129 |
+
# ### Label Multi-Species Images
|
| 130 |
+
# We'll go by both the URL and image ID, which do seem to correspond to the same images (for uniqueness).
|
| 131 |
|
| 132 |
# %%
|
| 133 |
+
df_cleaned["multi_species"] = df_cleaned.duplicated(subset = ["url_aws", "image_id"], keep = False)
|
| 134 |
|
| 135 |
+
df_cleaned.loc[df_cleaned["multi_species"]].nunique()
|
|
|
|
| 136 |
|
| 137 |
# %% [markdown]
|
| 138 |
+
# We've got just under 100K images that have multiple species. We can figure out how many each of them have, and then move on to looking at images per sequence and other labeling info.
|
| 139 |
|
| 140 |
# %%
|
| 141 |
+
multi_sp_imgs = list(df_cleaned.loc[df_cleaned["multi_species"], "image_id"].unique())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
|
| 143 |
# %%
|
| 144 |
+
for img in multi_sp_imgs:
|
| 145 |
+
df_cleaned.loc[df_cleaned["image_id"] == img, "num_species"] = df_cleaned.loc[df_cleaned["image_id"] == img].shape[0]
|
| 146 |
+
|
| 147 |
+
df_cleaned.head()
|
| 148 |
|
| 149 |
# %% [markdown]
|
| 150 |
+
# #### Save this to CSV now we got those counts
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
# %%
|
| 153 |
+
df_cleaned.to_csv("../data/lila_image_urls_and_labels.csv", index = False)
|
| 154 |
|
| 155 |
# %%
|
| 156 |
+
df_cleaned.loc[df_cleaned["multi_species"]].head()
|
| 157 |
|
| 158 |
+
# %% [markdown]
|
| 159 |
+
# How many different species do we generally have when we have multiple species in an image?
|
| 160 |
|
| 161 |
# %%
|
| 162 |
+
df_cleaned.num_species.value_counts()
|
| 163 |
|
| 164 |
# %% [markdown]
|
| 165 |
+
# We have 97,567 images with 2 different species (most multi-species instances), 2,023 with 3 different species, and 92 with 4.
|
| 166 |
#
|
| 167 |
+
# We will want to dedicate some more time to exploring some of these taxonomic counts, but we'll first look at the number of unique taxa (by Linnean 7-rank (`unique_7_tuple`) and then by all taxonomic labels (`unique_taxa`) available). We'll compare these to the number of unique scientific and common names, then perhaps add a count of number of creatures based on one of those labels. At that point we may save another copy of this CSV and start a new analysis notebook.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
|
| 169 |
# %%
|
| 170 |
+
df_cleaned.annotation_level.value_counts()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
|
| 172 |
# %% [markdown]
|
| 173 |
+
# We've got ~3M labeled to the image and another 3M unknonwn labeling (all from NACTI, which Andrey will check on), leaving ~5M labeled only to at the sequence level. This _should_ give Jianyang something to work with to start exploring near-duplicate de-duplication.
|
| 174 |
#
|
| 175 |
+
# Let's update the non-multi species images to show 1 in the `num_species` column, then move on to checking the taxonomy strings.
|
| 176 |
|
| 177 |
# %%
|
| 178 |
+
df_cleaned.loc[df_cleaned["num_species"].isna(), "num_species"] = 1.0
|
| 179 |
|
| 180 |
+
df_cleaned.num_species.value_counts()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
|
| 182 |
# %% [markdown]
|
| 183 |
+
# ### Taxonomic String Exploration
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 184 |
|
| 185 |
# %%
|
| 186 |
+
lin_taxa = ['kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']
|
| 187 |
+
all_taxa = ['kingdom',
|
| 188 |
+
'phylum',
|
| 189 |
+
'subphylum',
|
| 190 |
+
'superclass',
|
| 191 |
+
'class',
|
| 192 |
+
'subclass',
|
| 193 |
+
'infraclass',
|
| 194 |
+
'superorder',
|
| 195 |
+
'order',
|
| 196 |
+
'suborder',
|
| 197 |
+
'infraorder',
|
| 198 |
+
'superfamily',
|
| 199 |
+
'family',
|
| 200 |
+
'subfamily',
|
| 201 |
+
'tribe',
|
| 202 |
+
'genus',
|
| 203 |
+
'species',
|
| 204 |
+
'subspecies',
|
| 205 |
+
'variety']
|
| 206 |
|
| 207 |
# %% [markdown]
|
| 208 |
+
# #### How many have all 7 Linnean ranks?
|
| 209 |
|
| 210 |
# %%
|
| 211 |
+
df_all_taxa = df_cleaned.dropna(subset = lin_taxa)
|
| 212 |
+
df_all_taxa[all_taxa].info(show_counts = True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 213 |
|
| 214 |
# %% [markdown]
|
| 215 |
+
# That's pretty good coverage: 7,521,712 out of 10,965,902. It looks like many of them also have the other taxonomic ranks too. Now how many different 7-tuples are there?
|
|
|
|
| 216 |
#
|
| 217 |
+
# #### How many unique 7-tuples?
|
|
|
|
|
|
|
|
|
|
| 218 |
|
| 219 |
# %%
|
| 220 |
+
#number of unique 7-tuples in full dataset
|
| 221 |
+
df_cleaned['lin_duplicate'] = df_cleaned.duplicated(subset = lin_taxa, keep = 'first')
|
| 222 |
+
df_unique_lin_taxa = df_cleaned.loc[~df_cleaned['lin_duplicate']].copy()
|
| 223 |
+
df_unique_lin_taxa.info(show_counts = True)
|
| 224 |
|
| 225 |
+
# %% [markdown]
|
| 226 |
+
# Interesting, we have 891 unique 7-tuple taxonomic strings, but 1 scientific and common name seem to be missing.
|
| 227 |
+
# What's the uniqueness count here?
|
| 228 |
|
| 229 |
# %%
|
| 230 |
+
df_unique_lin_taxa.nunique()
|
| 231 |
|
| 232 |
+
# %% [markdown]
|
| 233 |
+
# They're across all datasets. We have 890 unique scientific names and 886 unique common names (from 885 original labels).
|
| 234 |
|
| 235 |
# %%
|
| 236 |
+
df_unique_lin_taxa.loc[(df_unique_lin_taxa["scientific_name"].isna()) | (df_unique_lin_taxa["common_name"].isna())]
|
| 237 |
|
| 238 |
# %% [markdown]
|
| 239 |
+
# It's a car...We need to remove cars...
|
| 240 |
|
| 241 |
# %%
|
| 242 |
+
df_cleaned.loc[df_cleaned["original_label"] == "car"].shape
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 243 |
|
| 244 |
# %%
|
| 245 |
+
df_cleaned.loc[df_cleaned["original_label"] == "car", "dataset_name"].value_counts()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 246 |
|
| 247 |
# %% [markdown]
|
| 248 |
+
# #### How many unique full taxa (sub ranks included)?
|
| 249 |
|
| 250 |
# %%
|
| 251 |
+
#number of unique 7-tuples in full dataset
|
| 252 |
+
df_cleaned['full_duplicate'] = df_cleaned.duplicated(subset = all_taxa, keep = 'first')
|
| 253 |
+
df_unique_all_taxa = df_cleaned.loc[~df_cleaned['full_duplicate']].copy()
|
| 254 |
+
df_unique_all_taxa.info(show_counts = True)
|
| 255 |
|
| 256 |
+
# %% [markdown]
|
| 257 |
+
# When we consider the sub-ranks as well we wind up with 909 unique taxa (still with one scientific and common name missing--the car!).
|
|
|
|
|
|
|
|
|
|
| 258 |
|
| 259 |
# %%
|
| 260 |
+
df_unique_all_taxa.nunique()
|
| 261 |
|
| 262 |
# %% [markdown]
|
| 263 |
+
# We have now captured all 908 unique scientific names, but only 901 of the 999 unique common names.
|
| 264 |
|
| 265 |
# %%
|
| 266 |
+
df_unique_all_taxa.loc[(df_unique_all_taxa["scientific_name"].isna()) | (df_unique_all_taxa["common_name"].isna())]
|
| 267 |
|
| 268 |
+
# %% [markdown]
|
| 269 |
+
# #### Let's remove those cars
|
|
|
|
|
|
|
|
|
|
| 270 |
|
| 271 |
# %%
|
| 272 |
+
df_cleaned = df_cleaned[df_cleaned["original_label"] != "car"].copy()
|
| 273 |
+
df_cleaned[["original_label", "scientific_name", "common_name", "kingdom"]].info(show_counts = True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 274 |
|
| 275 |
# %% [markdown]
|
| 276 |
+
# Now we have 10,961,185 instead of 10,965,902 images; they all have `original_label`, but only 10,192,703 of them have `scientific_name`, `common_name`, and `kingdom`. What are the `original_label`s for those ~800K images?
|
|
|
|
| 277 |
|
| 278 |
# %%
|
| 279 |
+
no_taxa = df_cleaned.loc[(df_cleaned["scientific_name"].isna()) & (df_cleaned["common_name"].isna()) & (df_cleaned["kingdom"].isna())].copy()
|
|
|
|
| 280 |
|
| 281 |
+
print(no_taxa[["dataset_name", "original_label"]].nunique())
|
| 282 |
+
no_taxa[["dataset_name", "original_label"]].info(show_counts = True)
|
| 283 |
|
| 284 |
# %% [markdown]
|
| 285 |
+
# What are these 24 other labels and how are the 768,482 images with them distributed across these 12 datasets?
|
| 286 |
|
| 287 |
# %%
|
| 288 |
+
no_taxa["original_label"].value_counts()
|
| 289 |
|
| 290 |
# %%
|
| 291 |
+
no_taxa["dataset_name"].value_counts()
|
| 292 |
|
| 293 |
# %%
|
| 294 |
+
no_taxa.groupby(["dataset_name"])["original_label"].value_counts()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 295 |
|
| 296 |
# %% [markdown]
|
| 297 |
+
# Interesting. It seems like all of these should also be removed. Vegetation obstruction could of course be labeled in Plantae, but we're not going to be labeling 7K images for this project.
|
| 298 |
+
#
|
| 299 |
+
# Let's remove them, then we should have 10,192,703 images.
|
| 300 |
|
| 301 |
# %%
|
| 302 |
+
non_taxa_labels = list(no_taxa["original_label"].unique())
|
|
|
|
| 303 |
|
| 304 |
# %%
|
| 305 |
+
df_clean = df_cleaned.loc[~df_cleaned["original_label"].isin(non_taxa_labels)].copy()
|
| 306 |
+
df_clean.info(show_counts = True)
|
| 307 |
|
| 308 |
# %%
|
| 309 |
+
df_clean.nunique()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 310 |
|
| 311 |
# %% [markdown]
|
| 312 |
+
# Let's check out our top ten labels, scientific names, and common names. Then we'll save this cleaned metadata file.
|
| 313 |
|
| 314 |
# %%
|
| 315 |
+
df_clean["original_label"].value_counts()[:10]
|
|
|
|
| 316 |
|
| 317 |
# %%
|
| 318 |
+
df_clean["scientific_name"].value_counts()[:10]
|
|
|
|
| 319 |
|
| 320 |
# %%
|
| 321 |
+
df_clean["common_name"].value_counts()[:10]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 322 |
|
| 323 |
+
# %% [markdown]
|
| 324 |
+
# There are also 257,159 humans in here! Glad the number agrees across labels. We'll probably need to remove the humans, though I may save a copy with them still on the HF repo (it is just our dev repo). Which datasets have them? I thought humans were filtered out previously (though I could be mistaken as they seem to be in 15 of the 20 datasets).
|
| 325 |
|
| 326 |
+
# %%
|
| 327 |
+
df_clean.loc[df_clean["original_label"] == "human", "dataset_name"].value_counts()
|
| 328 |
|
| 329 |
# %% [markdown]
|
| 330 |
+
# What do human labels look like (as in do they have the full taxa structure)?
|
|
|
|
|
|
|
| 331 |
|
| 332 |
# %%
|
| 333 |
+
df_clean.loc[df_clean["original_label"] == "human"].sample(5)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 334 |
|
| 335 |
+
# %% [markdown]
|
| 336 |
+
# It does seem to have full taxa...interesting.
|
| 337 |
|
| 338 |
# %%
|
| 339 |
+
df_clean.to_csv("../data/lila_image_urls_and_labels_wHumans.csv", index = False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 340 |
|
| 341 |
# %%
|
| 342 |
+
df_clean.loc[df_clean["original_label"] != "human"].to_csv("../data/lila_image_urls_and_labels.csv", index = False)
|
|
|
|
|
|
|
| 343 |
|
| 344 |
# %%
|
| 345 |
+
taxa = [col for col in list(df_clean.columns) if col in all_taxa or col =="original_label"]
|
| 346 |
|
| 347 |
+
df_taxa = df_clean[taxa].copy()
|
| 348 |
+
df_taxa.loc[df_taxa["original_label"] == "human"].sample(7)
|
|
|
|
|
|
|
| 349 |
|
| 350 |
# %%
|
| 351 |
+
df_clean.loc[df_clean["original_label"] != "human"].info(show_counts = True)
|
|
|
|
|
|
|
| 352 |
|
| 353 |
# %%
|
| 354 |
+
df_clean.loc[df_clean["original_label"] != "human"].nunique()
|
| 355 |
+
|
| 356 |
+
# %% [markdown]
|
| 357 |
+
# We have 1,198,696 distinct sequence IDs for the 9,849,119 unique image IDs, suggesting an average of 8 images per sequence?
|
| 358 |
|
| 359 |
# %%
|
| 360 |
+
df_clean.loc[df_clean["original_label"] != "human", "annotation_level"].value_counts()
|
|
|
|
|
|
|
| 361 |
|
| 362 |
# %% [markdown]
|
| 363 |
+
# #### Check Number of Images per Scientific Name?
|
| 364 |
|
| 365 |
# %%
|
|
|
|
|
|
|
| 366 |
|
| 367 |
+
# %%
|
| 368 |
|
| 369 |
# %%
|
| 370 |
+
sns.histplot(df_clean.loc[df_clean["original_label"] != "human"], y = 'class')
|
| 371 |
|
| 372 |
# %%
|
| 373 |
+
sns.histplot(df_clean.loc[df_clean["original_label"] != "human"], y = 'order')
|
| 374 |
|
| 375 |
# %%
|