|
40 | 40 | ]
|
41 | 41 | },
|
42 | 42 | {
|
| 43 | + "attachments": {}, |
43 | 44 | "cell_type": "markdown",
|
44 | 45 | "metadata": {
|
45 | 46 | "id": "IgYKebt871EK"
|
|
59 | 60 | ]
|
60 | 61 | },
|
61 | 62 | {
|
| 63 | + "attachments": {}, |
62 | 64 | "cell_type": "markdown",
|
63 | 65 | "metadata": {
|
64 | 66 | "id": "6JTRoM7E71EU"
|
|
95 | 97 | ]
|
96 | 98 | },
|
97 | 99 | {
|
| 100 | + "attachments": {}, |
98 | 101 | "cell_type": "markdown",
|
99 | 102 | "metadata": {
|
100 | 103 | "id": "6VKVqLb371EV"
|
|
130 | 133 | ]
|
131 | 134 | },
|
132 | 135 | {
|
| 136 | + "attachments": {}, |
133 | 137 | "cell_type": "markdown",
|
134 | 138 | "metadata": {
|
135 | 139 | "id": "cREmhMWJ71EX"
|
|
143 | 147 | ]
|
144 | 148 | },
|
145 | 149 | {
|
| 150 | + "attachments": {}, |
146 | 151 | "cell_type": "markdown",
|
147 | 152 | "metadata": {
|
148 | 153 | "id": "1NhotGiT71EY"
|
|
199 | 204 | ]
|
200 | 205 | },
|
201 | 206 | {
|
| 207 | + "attachments": {}, |
202 | 208 | "cell_type": "markdown",
|
203 | 209 | "metadata": {
|
204 | 210 | "id": "LgTG6buf71Ea"
|
|
256 | 262 | ]
|
257 | 263 | },
|
258 | 264 | {
|
| 265 | + "attachments": {}, |
259 | 266 | "cell_type": "markdown",
|
260 | 267 | "metadata": {
|
261 | 268 | "id": "SzFGcrhv71Ed"
|
|
303 | 310 | "# Get all faces from the testing dataset\n",
|
304 | 311 | "test_imgs = test_loader.get_all_faces()\n",
|
305 | 312 | "\n",
|
306 |
| - "# Call the Capsa-wrapped classifier to generate outputs: predictions, uncertainty, and bias!\n", |
307 |
| - "predictions, uncertainty, bias = wrapped_model.predict(test_imgs, batch_size=512)" |
| 313 | + "# Call the Capsa-wrapped classifier to generate outputs: a RiskTensor dictionary consisting of predictions, uncertainty, and bias!\n", |
| 314 | + "out = wrapped_model.predict(test_imgs, batch_size=512)\n" |
308 | 315 | ]
|
309 | 316 | },
|
310 | 317 | {
|
| 318 | + "attachments": {}, |
311 | 319 | "cell_type": "markdown",
|
312 | 320 | "metadata": {
|
313 | 321 | "id": "629ng-_H6WOk"
|
|
329 | 337 | "### Analyzing representation bias scores ###\n",
|
330 | 338 | "\n",
|
331 | 339 | "# Sort according to lowest to highest representation scores\n",
|
332 |
| - "indices = np.argsort(bias, axis=None) # sort the score values themselves\n", |
| 340 | + "indices = np.argsort(out.bias, axis=None) # sort the score values themselves\n", |
333 | 341 | "sorted_images = test_imgs[indices] # sort images from lowest to highest representations\n",
|
334 |
| - "sorted_biases = bias[indices] # order the representation bias scores\n", |
335 |
| - "sorted_preds = predictions[indices] # order the prediction values\n", |
| 342 | + "sorted_biases = out.bias.numpy()[indices] # order the representation bias scores\n", |
| 343 | + "sorted_preds = out.y_hat.numpy()[indices] # order the prediction values\n", |
336 | 344 | "\n",
|
337 | 345 | "\n",
|
338 | 346 | "# Visualize the 20 images with the lowest and highest representation in the test dataset\n",
|
|
345 | 353 | ]
|
346 | 354 | },
|
347 | 355 | {
|
| 356 | + "attachments": {}, |
348 | 357 | "cell_type": "markdown",
|
349 | 358 | "metadata": {
|
350 | 359 | "id": "-JYmGMJF71Ef"
|
|
368 | 377 | ]
|
369 | 378 | },
|
370 | 379 | {
|
| 380 | + "attachments": {}, |
371 | 381 | "cell_type": "markdown",
|
372 | 382 | "metadata": {
|
373 | 383 | "id": "i8ERzg2-71Ef"
|
|
389 | 399 | ]
|
390 | 400 | },
|
391 | 401 | {
|
| 402 | + "attachments": {}, |
392 | 403 | "cell_type": "markdown",
|
393 | 404 | "metadata": {
|
394 | 405 | "id": "cRNV-3SU71Eg"
|
|
404 | 415 | ]
|
405 | 416 | },
|
406 | 417 | {
|
| 418 | + "attachments": {}, |
407 | 419 | "cell_type": "markdown",
|
408 | 420 | "metadata": {
|
409 | 421 | "id": "ww5lx7ue71Eg"
|
|
420 | 432 | ]
|
421 | 433 | },
|
422 | 434 | {
|
| 435 | + "attachments": {}, |
423 | 436 | "cell_type": "markdown",
|
424 | 437 | "metadata": {
|
425 | 438 | "id": "NEfeWo2p7wKm"
|
|
442 | 455 | "### Analyzing epistemic uncertainty estimates ###\n",
|
443 | 456 | "\n",
|
444 | 457 | "# Sort according to epistemic uncertainty estimates\n",
|
445 |
| - "epistemic_indices = np.argsort(uncertainty, axis=None) # sort the uncertainty values\n", |
| 458 | + "epistemic_indices = np.argsort(out.epistemic, axis=None) # sort the uncertainty values\n", |
446 | 459 | "epistemic_images = test_imgs[epistemic_indices] # sort images from lowest to highest uncertainty\n",
|
447 |
| - "sorted_epistemic = uncertainty[epistemic_indices] # order the uncertainty scores\n", |
448 |
| - "sorted_epistemic_preds = predictions[epistemic_indices] # order the prediction values\n", |
| 460 | + "sorted_epistemic = out.epistemic.numpy()[epistemic_indices] # order the uncertainty scores\n", |
| 461 | + "sorted_epistemic_preds = out.y_hat.numpy()[epistemic_indices] # order the prediction values\n", |
449 | 462 | "\n",
|
450 | 463 | "\n",
|
451 | 464 | "# Visualize the 20 images with the LEAST and MOST epistemic uncertainty\n",
|
|
458 | 471 | ]
|
459 | 472 | },
|
460 | 473 | {
|
| 474 | + "attachments": {}, |
461 | 475 | "cell_type": "markdown",
|
462 | 476 | "metadata": {
|
463 | 477 | "id": "L0dA8EyX71Eh"
|
|
481 | 495 | ]
|
482 | 496 | },
|
483 | 497 | {
|
| 498 | + "attachments": {}, |
484 | 499 | "cell_type": "markdown",
|
485 | 500 | "metadata": {
|
486 | 501 | "id": "iyn0IE6x71Eh"
|
|
496 | 511 | ]
|
497 | 512 | },
|
498 | 513 | {
|
| 514 | + "attachments": {}, |
499 | 515 | "cell_type": "markdown",
|
500 | 516 | "metadata": {
|
501 | 517 | "id": "XbwRbesM71Eh"
|
|
561 | 577 | "\n",
|
562 | 578 | " # After the epoch is done, recompute data sampling proabilities \n",
|
563 | 579 | " # according to the inverse of the bias\n",
|
564 |
| - " pred, unc, bias = wrapper(train_imgs)\n", |
| 580 | + " out = wrapper(train_imgs)\n", |
565 | 581 | "\n",
|
566 | 582 | " # Increase the probability of sampling under-represented datapoints by setting \n",
|
567 | 583 | " # the probability to the **inverse** of the biases\n",
|
568 |
| - " inverse_bias = 1.0 / (bias.numpy() + 1e-7)\n", |
| 584 | + " inverse_bias = 1.0 / (np.mean(out.bias.numpy(),axis=-1) + 1e-7)\n", |
569 | 585 | "\n",
|
570 | 586 | " # Normalize the inverse biases in order to convert them to probabilities\n",
|
571 | 587 | " p_faces = inverse_bias / np.sum(inverse_bias)\n",
|
|
575 | 591 | ]
|
576 | 592 | },
|
577 | 593 | {
|
| 594 | + "attachments": {}, |
578 | 595 | "cell_type": "markdown",
|
579 | 596 | "metadata": {
|
580 | 597 | "id": "SwXrAeBo71Ej"
|
|
598 | 615 | "### Evaluation of debiased model ###\n",
|
599 | 616 | "\n",
|
600 | 617 | "# Get classification predictions, uncertainties, and representation bias scores\n",
|
601 |
| - "pred, unc, bias = wrapper.predict(test_imgs)\n", |
| 618 | + "out = wrapper.predict(test_imgs)\n", |
602 | 619 | "\n",
|
603 | 620 | "# Sort according to lowest to highest representation scores\n",
|
604 |
| - "indices = np.argsort(bias, axis=None)\n", |
| 621 | + "indices = np.argsort(out.bias, axis=None)\n", |
605 | 622 | "bias_images = test_imgs[indices] # sort the images\n",
|
606 |
| - "sorted_bias = bias[indices] # sort the representation bias scores\n", |
607 |
| - "sorted_bias_preds = pred[indices] # sort the predictions\n", |
| 623 | + "sorted_bias = out.bias.numpy()[indices] # sort the representation bias scores\n", |
| 624 | + "sorted_bias_preds = out.y_hat.numpy()[indices] # sort the predictions\n", |
608 | 625 | "\n",
|
609 | 626 | "# Plot the representation bias vs. the accuracy\n",
|
610 | 627 | "plt.xlabel(\"Density (Representation)\")\n",
|
|
613 | 630 | ]
|
614 | 631 | },
|
615 | 632 | {
|
| 633 | + "attachments": {}, |
616 | 634 | "cell_type": "markdown",
|
617 | 635 | "metadata": {
|
618 | 636 | "id": "d1cEEnII71Ej"
|
|
681 | 699 | "name": "python",
|
682 | 700 | "nbconvert_exporter": "python",
|
683 | 701 | "pygments_lexer": "ipython3",
|
684 |
| - "version": "3.9.6 (default, Oct 18 2022, 12:41:40) \n[Clang 14.0.0 (clang-1400.0.29.202)]" |
| 702 | + "version": "3.9.16" |
685 | 703 | },
|
686 | 704 | "vscode": {
|
687 | 705 | "interpreter": {
|
|
0 commit comments