From 6fcd8259028dc518973ee1f30c72bdfe66789300 Mon Sep 17 00:00:00 2001 From: Thalles Silva Date: Sun, 17 Jan 2021 20:12:27 -0300 Subject: [PATCH 1/4] Update mini_batch_logistic_regression_evaluator.ipynb --- feature_eval/mini_batch_logistic_regression_evaluator.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/feature_eval/mini_batch_logistic_regression_evaluator.ipynb b/feature_eval/mini_batch_logistic_regression_evaluator.ipynb index 032cfcb..f69fd40 100644 --- a/feature_eval/mini_batch_logistic_regression_evaluator.ipynb +++ b/feature_eval/mini_batch_logistic_regression_evaluator.ipynb @@ -20,7 +20,7 @@ "version": "3.6.6" }, "colab": { - "name": "Copy of mini-batch-logistic-regression-evaluator.ipynb", + "name": "Mini-batch-logistic-regression-evaluator.ipynb", "provenance": [] }, "accelerator": "GPU", @@ -839,4 +839,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From d1d59400fe7d3de6698a8c99df8b4608952fc24e Mon Sep 17 00:00:00 2001 From: Thalles Silva Date: Sun, 17 Jan 2021 20:27:34 -0300 Subject: [PATCH 2/4] Created using Colaboratory --- ..._batch_logistic_regression_evaluator.ipynb | 509 +++++------------- 1 file changed, 131 insertions(+), 378 deletions(-) diff --git a/feature_eval/mini_batch_logistic_regression_evaluator.ipynb b/feature_eval/mini_batch_logistic_regression_evaluator.ipynb index f69fd40..f64e920 100644 --- a/feature_eval/mini_batch_logistic_regression_evaluator.ipynb +++ b/feature_eval/mini_batch_logistic_regression_evaluator.ipynb @@ -20,261 +20,26 @@ "version": "3.6.6" }, "colab": { - "name": "Mini-batch-logistic-regression-evaluator.ipynb", - "provenance": [] + "name": "Copy of mini-batch-logistic-regression-evaluator.ipynb", + "provenance": [], + "include_colab_link": true }, "accelerator": "GPU", "widgets": { - "application/vnd.jupyter.widget-state+json": { - "bcf2585d31644e0f86569e604b2e635b": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_2612abdc916d47418dda7287807a00ce", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_027c3ca8839846fcae9d6bb23fb10399", - "IPY_MODEL_1d09572d2433498caa268567c838e640" - ] - } - }, - "2612abdc916d47418dda7287807a00ce": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "027c3ca8839846fcae9d6bb23fb10399": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_08cddf6f231a4e89ab8e1e026cf11796", - "_dom_classes": [], - "description": "", - "_model_name": "FloatProgressModel", - "bar_style": "info", - "max": 1, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 1, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_75267826defa4565be4bed232272434e" - } - }, - "1d09572d2433498caa268567c838e640": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_8c189a0cd687479dba885a9c2d47fb64", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 2640404480/? [01:10<00:00, 98486594.59it/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_b6528931de654b3c85b94bec14f4891b" - } - }, - "08cddf6f231a4e89ab8e1e026cf11796": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "75267826defa4565be4bed232272434e": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "8c189a0cd687479dba885a9c2d47fb64": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "DescriptionStyleModel", - "description_width": "", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "_model_module": "@jupyter-widgets/controls" - } - }, - "b6528931de654b3c85b94bec14f4891b": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - } - } + "application/vnd.jupyter.widget-state+json": {} } }, "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, { "cell_type": "code", "metadata": { @@ -285,13 +50,8 @@ "import sys\n", "import numpy as np\n", "import os\n", - "from sklearn.neighbors import KNeighborsClassifier\n", "import yaml\n", "import matplotlib.pyplot as plt\n", - "from sklearn.decomposition import PCA\n", - "from sklearn.linear_model import LogisticRegression\n", - "from sklearn import preprocessing\n", - "import importlib.util\n", "import torchvision" ], "execution_count": null, @@ -522,7 +282,7 @@ "elif config.arch == 'resnet50':\n", " model = torchvision.models.resnet50(pretrained=False, num_classes=10).to(device)" ], - "execution_count": null, + "execution_count": 11, "outputs": [] }, { @@ -542,7 +302,7 @@ " state_dict[k[len(\"backbone.\"):]] = state_dict[k]\n", " del state_dict[k]" ], - "execution_count": null, + "execution_count": 12, "outputs": [] }, { @@ -554,7 +314,7 @@ "log = model.load_state_dict(state_dict, strict=False)\n", "assert log.missing_keys == ['fc.weight', 'fc.bias']" ], - "execution_count": null, + "execution_count": 13, "outputs": [] }, { @@ -563,19 +323,12 @@ "id": "_GC0a14uWRr6", "colab": { "base_uri": "https://localhost:8080/", - "height": 117, + "height": 102, "referenced_widgets": [ - "bcf2585d31644e0f86569e604b2e635b", - "2612abdc916d47418dda7287807a00ce", - "027c3ca8839846fcae9d6bb23fb10399", - "1d09572d2433498caa268567c838e640", - "08cddf6f231a4e89ab8e1e026cf11796", - "75267826defa4565be4bed232272434e", - "8c189a0cd687479dba885a9c2d47fb64", - "b6528931de654b3c85b94bec14f4891b" + "48ebf2f69d1f4f5a9208cd2923eb5eac" ] }, - "outputId": "56db3fac-10cc-4985-932d-878375ccd18f" + "outputId": "6c3b86ad-b568-4c68-c1fb-1f7b2abbb6aa" }, "source": [ "if config.dataset_name == 'cifar10':\n", @@ -584,7 +337,7 @@ " train_loader, test_loader = get_stl10_data_loaders(download=True)\n", "print(\"Dataset:\", config.dataset_name)" ], - "execution_count": null, + "execution_count": 14, "outputs": [ { "output_type": "stream", @@ -597,9 +350,9 @@ "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "bcf2585d31644e0f86569e604b2e635b", - "version_minor": 0, - "version_major": 2 + "model_id": "48ebf2f69d1f4f5a9208cd2923eb5eac", + "version_major": 2, + "version_minor": 0 }, "text/plain": [ "HBox(children=(FloatProgress(value=1.0, bar_style='info', max=1.0), HTML(value='')))" @@ -634,7 +387,7 @@ "parameters = list(filter(lambda p: p.requires_grad, model.parameters()))\n", "assert len(parameters) == 2 # fc.weight, fc.bias" ], - "execution_count": null, + "execution_count": 15, "outputs": [] }, { @@ -646,7 +399,7 @@ "optimizer = torch.optim.Adam(model.parameters(), lr=3e-4, weight_decay=0.0008)\n", "criterion = torch.nn.CrossEntropyLoss().to(device)" ], - "execution_count": null, + "execution_count": 16, "outputs": [] }, { @@ -671,7 +424,7 @@ " res.append(correct_k.mul_(100.0 / batch_size))\n", " return res" ], - "execution_count": null, + "execution_count": 17, "outputs": [] }, { @@ -681,7 +434,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "95b285c8-2b26-4d2c-ccc3-bb9111871c8d" + "outputId": "d6127d8e-836f-4e69-a344-fee7e836d63a" }, "source": [ "epochs = 100\n", @@ -717,111 +470,111 @@ " top5_accuracy /= (counter + 1)\n", " print(f\"Epoch {epoch}\\tTop1 Train accuracy {top1_train_accuracy.item()}\\tTop1 Test accuracy: {top1_accuracy.item()}\\tTop5 test acc: {top5_accuracy.item()}\")" ], - "execution_count": null, + "execution_count": 18, "outputs": [ { "output_type": "stream", "text": [ - "Top1 Train accuracy 29.47265625\tTop1 Test accuracy: 42.4560546875\tTop5 test acc: 92.41943359375\n", - "Top1 Train accuracy 49.47265625\tTop1 Test accuracy: 53.662109375\tTop5 test acc: 96.15478515625\n", - "Top1 Train accuracy 56.85546875\tTop1 Test accuracy: 57.92236328125\tTop5 test acc: 96.74072265625\n", - "Top1 Train accuracy 59.3359375\tTop1 Test accuracy: 59.9365234375\tTop5 test acc: 97.021484375\n", - "Top1 Train accuracy 60.8984375\tTop1 Test accuracy: 61.1572265625\tTop5 test acc: 97.15576171875\n", - "Top1 Train accuracy 61.89453125\tTop1 Test accuracy: 61.8408203125\tTop5 test acc: 97.2900390625\n", - "Top1 Train accuracy 62.48046875\tTop1 Test accuracy: 62.5244140625\tTop5 test acc: 97.3388671875\n", - "Top1 Train accuracy 63.125\tTop1 Test accuracy: 63.037109375\tTop5 test acc: 97.44873046875\n", - "Top1 Train accuracy 64.4140625\tTop1 Test accuracy: 63.39111328125\tTop5 test acc: 97.54638671875\n", - "Top1 Train accuracy 64.86328125\tTop1 Test accuracy: 63.85498046875\tTop5 test acc: 97.5830078125\n", - "Top1 Train accuracy 65.15625\tTop1 Test accuracy: 64.0869140625\tTop5 test acc: 97.65625\n", - "Top1 Train accuracy 65.56640625\tTop1 Test accuracy: 64.34326171875\tTop5 test acc: 97.69287109375\n", - "Top1 Train accuracy 65.859375\tTop1 Test accuracy: 64.48974609375\tTop5 test acc: 97.7294921875\n", - "Top1 Train accuracy 66.03515625\tTop1 Test accuracy: 64.83154296875\tTop5 test acc: 97.75390625\n", - "Top1 Train accuracy 66.171875\tTop1 Test accuracy: 65.02685546875\tTop5 test acc: 97.79052734375\n", - "Top1 Train accuracy 66.484375\tTop1 Test accuracy: 65.46630859375\tTop5 test acc: 97.7783203125\n", - "Top1 Train accuracy 66.953125\tTop1 Test accuracy: 65.66162109375\tTop5 test acc: 97.8515625\n", - "Top1 Train accuracy 67.2265625\tTop1 Test accuracy: 65.91796875\tTop5 test acc: 97.93701171875\n", - "Top1 Train accuracy 67.48046875\tTop1 Test accuracy: 65.97900390625\tTop5 test acc: 97.91259765625\n", - "Top1 Train accuracy 67.8125\tTop1 Test accuracy: 66.11328125\tTop5 test acc: 97.93701171875\n", - "Top1 Train accuracy 68.046875\tTop1 Test accuracy: 66.3330078125\tTop5 test acc: 97.9736328125\n", - "Top1 Train accuracy 68.45703125\tTop1 Test accuracy: 66.5283203125\tTop5 test acc: 97.94921875\n", - "Top1 Train accuracy 68.59375\tTop1 Test accuracy: 66.63818359375\tTop5 test acc: 97.94921875\n", - "Top1 Train accuracy 68.7890625\tTop1 Test accuracy: 66.748046875\tTop5 test acc: 97.93701171875\n", - "Top1 Train accuracy 69.00390625\tTop1 Test accuracy: 66.90673828125\tTop5 test acc: 97.9248046875\n", - "Top1 Train accuracy 69.21875\tTop1 Test accuracy: 67.0654296875\tTop5 test acc: 97.9736328125\n", - "Top1 Train accuracy 69.35546875\tTop1 Test accuracy: 67.0654296875\tTop5 test acc: 97.9736328125\n", - "Top1 Train accuracy 69.66796875\tTop1 Test accuracy: 67.2119140625\tTop5 test acc: 97.93701171875\n", - "Top1 Train accuracy 69.765625\tTop1 Test accuracy: 67.24853515625\tTop5 test acc: 97.9736328125\n", - "Top1 Train accuracy 69.82421875\tTop1 Test accuracy: 67.4072265625\tTop5 test acc: 97.98583984375\n", - "Top1 Train accuracy 69.9609375\tTop1 Test accuracy: 67.431640625\tTop5 test acc: 97.98583984375\n", - "Top1 Train accuracy 70.09765625\tTop1 Test accuracy: 67.4560546875\tTop5 test acc: 97.998046875\n", - "Top1 Train accuracy 70.15625\tTop1 Test accuracy: 67.44384765625\tTop5 test acc: 98.01025390625\n", - "Top1 Train accuracy 70.29296875\tTop1 Test accuracy: 67.54150390625\tTop5 test acc: 98.0224609375\n", - "Top1 Train accuracy 70.41015625\tTop1 Test accuracy: 67.61474609375\tTop5 test acc: 98.05908203125\n", - "Top1 Train accuracy 70.5078125\tTop1 Test accuracy: 67.67578125\tTop5 test acc: 98.0712890625\n", - "Top1 Train accuracy 70.64453125\tTop1 Test accuracy: 67.73681640625\tTop5 test acc: 98.08349609375\n", - "Top1 Train accuracy 70.859375\tTop1 Test accuracy: 67.76123046875\tTop5 test acc: 98.0712890625\n", - "Top1 Train accuracy 70.8984375\tTop1 Test accuracy: 67.88330078125\tTop5 test acc: 98.08349609375\n", - "Top1 Train accuracy 71.07421875\tTop1 Test accuracy: 67.95654296875\tTop5 test acc: 98.095703125\n", - "Top1 Train accuracy 71.11328125\tTop1 Test accuracy: 67.93212890625\tTop5 test acc: 98.1201171875\n", - "Top1 Train accuracy 71.2890625\tTop1 Test accuracy: 68.0419921875\tTop5 test acc: 98.10791015625\n", - "Top1 Train accuracy 71.3671875\tTop1 Test accuracy: 68.10302734375\tTop5 test acc: 98.13232421875\n", - "Top1 Train accuracy 71.42578125\tTop1 Test accuracy: 68.1396484375\tTop5 test acc: 98.13232421875\n", - "Top1 Train accuracy 71.4453125\tTop1 Test accuracy: 68.1396484375\tTop5 test acc: 98.13232421875\n", - "Top1 Train accuracy 71.50390625\tTop1 Test accuracy: 68.1640625\tTop5 test acc: 98.1201171875\n", - "Top1 Train accuracy 71.484375\tTop1 Test accuracy: 68.2373046875\tTop5 test acc: 98.14453125\n", - "Top1 Train accuracy 71.6015625\tTop1 Test accuracy: 68.34716796875\tTop5 test acc: 98.15673828125\n", - "Top1 Train accuracy 71.7578125\tTop1 Test accuracy: 68.39599609375\tTop5 test acc: 98.15673828125\n", - "Top1 Train accuracy 71.89453125\tTop1 Test accuracy: 68.37158203125\tTop5 test acc: 98.20556640625\n", - "Top1 Train accuracy 72.01171875\tTop1 Test accuracy: 68.4326171875\tTop5 test acc: 98.20556640625\n", - "Top1 Train accuracy 72.1484375\tTop1 Test accuracy: 68.44482421875\tTop5 test acc: 98.2177734375\n", - "Top1 Train accuracy 72.1875\tTop1 Test accuracy: 68.51806640625\tTop5 test acc: 98.25439453125\n", - "Top1 Train accuracy 72.28515625\tTop1 Test accuracy: 68.603515625\tTop5 test acc: 98.2421875\n", - "Top1 Train accuracy 72.36328125\tTop1 Test accuracy: 68.5791015625\tTop5 test acc: 98.2666015625\n", - "Top1 Train accuracy 72.5390625\tTop1 Test accuracy: 68.61572265625\tTop5 test acc: 98.2666015625\n", - "Top1 Train accuracy 72.59765625\tTop1 Test accuracy: 68.64013671875\tTop5 test acc: 98.2666015625\n", - "Top1 Train accuracy 73.02734375\tTop1 Test accuracy: 68.7255859375\tTop5 test acc: 98.25439453125\n", - "Top1 Train accuracy 73.18359375\tTop1 Test accuracy: 68.76220703125\tTop5 test acc: 98.2666015625\n", - "Top1 Train accuracy 73.26171875\tTop1 Test accuracy: 68.8232421875\tTop5 test acc: 98.291015625\n", - "Top1 Train accuracy 73.359375\tTop1 Test accuracy: 68.85986328125\tTop5 test acc: 98.27880859375\n", - "Top1 Train accuracy 73.45703125\tTop1 Test accuracy: 68.8720703125\tTop5 test acc: 98.32763671875\n", - "Top1 Train accuracy 73.49609375\tTop1 Test accuracy: 68.9208984375\tTop5 test acc: 98.33984375\n", - "Top1 Train accuracy 73.53515625\tTop1 Test accuracy: 68.8720703125\tTop5 test acc: 98.33984375\n", - "Top1 Train accuracy 73.53515625\tTop1 Test accuracy: 68.9208984375\tTop5 test acc: 98.3642578125\n", - "Top1 Train accuracy 73.65234375\tTop1 Test accuracy: 69.00634765625\tTop5 test acc: 98.33984375\n", - "Top1 Train accuracy 73.76953125\tTop1 Test accuracy: 69.0185546875\tTop5 test acc: 98.33984375\n", - "Top1 Train accuracy 73.9453125\tTop1 Test accuracy: 69.0673828125\tTop5 test acc: 98.35205078125\n", - "Top1 Train accuracy 74.00390625\tTop1 Test accuracy: 69.1162109375\tTop5 test acc: 98.35205078125\n", - "Top1 Train accuracy 74.0625\tTop1 Test accuracy: 69.140625\tTop5 test acc: 98.3642578125\n", - "Top1 Train accuracy 74.12109375\tTop1 Test accuracy: 69.17724609375\tTop5 test acc: 98.3642578125\n", - "Top1 Train accuracy 74.21875\tTop1 Test accuracy: 69.20166015625\tTop5 test acc: 98.35205078125\n", - "Top1 Train accuracy 74.21875\tTop1 Test accuracy: 69.2626953125\tTop5 test acc: 98.33984375\n", - "Top1 Train accuracy 74.23828125\tTop1 Test accuracy: 69.3359375\tTop5 test acc: 98.33984375\n", - "Top1 Train accuracy 74.23828125\tTop1 Test accuracy: 69.37255859375\tTop5 test acc: 98.3154296875\n", - "Top1 Train accuracy 74.2578125\tTop1 Test accuracy: 69.42138671875\tTop5 test acc: 98.3154296875\n", - "Top1 Train accuracy 74.27734375\tTop1 Test accuracy: 69.482421875\tTop5 test acc: 98.3154296875\n", - "Top1 Train accuracy 74.375\tTop1 Test accuracy: 69.51904296875\tTop5 test acc: 98.30322265625\n", - "Top1 Train accuracy 74.39453125\tTop1 Test accuracy: 69.6044921875\tTop5 test acc: 98.30322265625\n", - "Top1 Train accuracy 74.43359375\tTop1 Test accuracy: 69.6044921875\tTop5 test acc: 98.30322265625\n", - "Top1 Train accuracy 74.43359375\tTop1 Test accuracy: 69.6044921875\tTop5 test acc: 98.30322265625\n", - "Top1 Train accuracy 74.4921875\tTop1 Test accuracy: 69.64111328125\tTop5 test acc: 98.3154296875\n", - "Top1 Train accuracy 74.5703125\tTop1 Test accuracy: 69.7021484375\tTop5 test acc: 98.3154296875\n", - "Top1 Train accuracy 74.66796875\tTop1 Test accuracy: 69.775390625\tTop5 test acc: 98.3154296875\n", - "Top1 Train accuracy 74.6875\tTop1 Test accuracy: 69.775390625\tTop5 test acc: 98.3154296875\n", - "Top1 Train accuracy 74.74609375\tTop1 Test accuracy: 69.76318359375\tTop5 test acc: 98.30322265625\n", - "Top1 Train accuracy 74.74609375\tTop1 Test accuracy: 69.78759765625\tTop5 test acc: 98.30322265625\n", - "Top1 Train accuracy 74.84375\tTop1 Test accuracy: 69.81201171875\tTop5 test acc: 98.3154296875\n", - "Top1 Train accuracy 74.94140625\tTop1 Test accuracy: 69.88525390625\tTop5 test acc: 98.32763671875\n", - "Top1 Train accuracy 75.0390625\tTop1 Test accuracy: 69.8974609375\tTop5 test acc: 98.32763671875\n", - "Top1 Train accuracy 75.05859375\tTop1 Test accuracy: 69.921875\tTop5 test acc: 98.3154296875\n", - "Top1 Train accuracy 75.078125\tTop1 Test accuracy: 69.95849609375\tTop5 test acc: 98.30322265625\n", - "Top1 Train accuracy 75.15625\tTop1 Test accuracy: 69.921875\tTop5 test acc: 98.30322265625\n", - "Top1 Train accuracy 75.21484375\tTop1 Test accuracy: 69.9462890625\tTop5 test acc: 98.30322265625\n", - "Top1 Train accuracy 75.17578125\tTop1 Test accuracy: 69.93408203125\tTop5 test acc: 98.30322265625\n", - "Top1 Train accuracy 75.17578125\tTop1 Test accuracy: 69.98291015625\tTop5 test acc: 98.291015625\n", - "Top1 Train accuracy 75.234375\tTop1 Test accuracy: 69.95849609375\tTop5 test acc: 98.3154296875\n", - "Top1 Train accuracy 75.234375\tTop1 Test accuracy: 69.98291015625\tTop5 test acc: 98.3154296875\n", - "Top1 Train accuracy 75.2734375\tTop1 Test accuracy: 70.00732421875\tTop5 test acc: 98.3154296875\n", - "Top1 Train accuracy 75.37109375\tTop1 Test accuracy: 70.01953125\tTop5 test acc: 98.3154296875\n" + "Epoch 0\tTop1 Train accuracy 27.890625\tTop1 Test accuracy: 42.05322265625\tTop5 test acc: 93.29833984375\n", + "Epoch 1\tTop1 Train accuracy 49.921875\tTop1 Test accuracy: 54.45556640625\tTop5 test acc: 96.1181640625\n", + "Epoch 2\tTop1 Train accuracy 57.3828125\tTop1 Test accuracy: 58.9599609375\tTop5 test acc: 96.9482421875\n", + "Epoch 3\tTop1 Train accuracy 60.01953125\tTop1 Test accuracy: 60.38818359375\tTop5 test acc: 97.03369140625\n", + "Epoch 4\tTop1 Train accuracy 61.7578125\tTop1 Test accuracy: 61.572265625\tTop5 test acc: 97.1923828125\n", + "Epoch 5\tTop1 Train accuracy 62.91015625\tTop1 Test accuracy: 62.21923828125\tTop5 test acc: 97.30224609375\n", + "Epoch 6\tTop1 Train accuracy 63.57421875\tTop1 Test accuracy: 62.6220703125\tTop5 test acc: 97.4365234375\n", + "Epoch 7\tTop1 Train accuracy 64.12109375\tTop1 Test accuracy: 63.18359375\tTop5 test acc: 97.55859375\n", + "Epoch 8\tTop1 Train accuracy 64.82421875\tTop1 Test accuracy: 63.51318359375\tTop5 test acc: 97.57080078125\n", + "Epoch 9\tTop1 Train accuracy 65.17578125\tTop1 Test accuracy: 63.80615234375\tTop5 test acc: 97.59521484375\n", + "Epoch 10\tTop1 Train accuracy 65.5859375\tTop1 Test accuracy: 64.14794921875\tTop5 test acc: 97.6318359375\n", + "Epoch 11\tTop1 Train accuracy 65.80078125\tTop1 Test accuracy: 64.51416015625\tTop5 test acc: 97.61962890625\n", + "Epoch 12\tTop1 Train accuracy 66.03515625\tTop1 Test accuracy: 64.70947265625\tTop5 test acc: 97.69287109375\n", + "Epoch 13\tTop1 Train accuracy 66.42578125\tTop1 Test accuracy: 64.88037109375\tTop5 test acc: 97.705078125\n", + "Epoch 14\tTop1 Train accuracy 66.9140625\tTop1 Test accuracy: 65.07568359375\tTop5 test acc: 97.76611328125\n", + "Epoch 15\tTop1 Train accuracy 67.265625\tTop1 Test accuracy: 65.24658203125\tTop5 test acc: 97.81494140625\n", + "Epoch 16\tTop1 Train accuracy 67.48046875\tTop1 Test accuracy: 65.46630859375\tTop5 test acc: 97.8515625\n", + "Epoch 17\tTop1 Train accuracy 67.6171875\tTop1 Test accuracy: 65.71044921875\tTop5 test acc: 97.86376953125\n", + "Epoch 18\tTop1 Train accuracy 67.83203125\tTop1 Test accuracy: 65.966796875\tTop5 test acc: 97.8759765625\n", + "Epoch 19\tTop1 Train accuracy 68.0078125\tTop1 Test accuracy: 66.05224609375\tTop5 test acc: 97.88818359375\n", + "Epoch 20\tTop1 Train accuracy 68.1640625\tTop1 Test accuracy: 66.17431640625\tTop5 test acc: 97.88818359375\n", + "Epoch 21\tTop1 Train accuracy 68.37890625\tTop1 Test accuracy: 66.30859375\tTop5 test acc: 97.900390625\n", + "Epoch 22\tTop1 Train accuracy 68.49609375\tTop1 Test accuracy: 66.50390625\tTop5 test acc: 97.88818359375\n", + "Epoch 23\tTop1 Train accuracy 68.75\tTop1 Test accuracy: 66.6259765625\tTop5 test acc: 97.91259765625\n", + "Epoch 24\tTop1 Train accuracy 68.90625\tTop1 Test accuracy: 66.68701171875\tTop5 test acc: 97.96142578125\n", + "Epoch 25\tTop1 Train accuracy 68.984375\tTop1 Test accuracy: 66.8212890625\tTop5 test acc: 97.998046875\n", + "Epoch 26\tTop1 Train accuracy 69.39453125\tTop1 Test accuracy: 66.9677734375\tTop5 test acc: 98.0224609375\n", + "Epoch 27\tTop1 Train accuracy 69.4921875\tTop1 Test accuracy: 67.1142578125\tTop5 test acc: 98.01025390625\n", + "Epoch 28\tTop1 Train accuracy 69.6484375\tTop1 Test accuracy: 67.1630859375\tTop5 test acc: 98.0224609375\n", + "Epoch 29\tTop1 Train accuracy 69.7265625\tTop1 Test accuracy: 67.19970703125\tTop5 test acc: 98.03466796875\n", + "Epoch 30\tTop1 Train accuracy 69.74609375\tTop1 Test accuracy: 67.24853515625\tTop5 test acc: 98.05908203125\n", + "Epoch 31\tTop1 Train accuracy 69.921875\tTop1 Test accuracy: 67.37060546875\tTop5 test acc: 98.03466796875\n", + "Epoch 32\tTop1 Train accuracy 70.078125\tTop1 Test accuracy: 67.46826171875\tTop5 test acc: 98.03466796875\n", + "Epoch 33\tTop1 Train accuracy 70.25390625\tTop1 Test accuracy: 67.5048828125\tTop5 test acc: 98.0712890625\n", + "Epoch 34\tTop1 Train accuracy 70.33203125\tTop1 Test accuracy: 67.59033203125\tTop5 test acc: 98.095703125\n", + "Epoch 35\tTop1 Train accuracy 70.48828125\tTop1 Test accuracy: 67.73681640625\tTop5 test acc: 98.13232421875\n", + "Epoch 36\tTop1 Train accuracy 70.5859375\tTop1 Test accuracy: 67.83447265625\tTop5 test acc: 98.1201171875\n", + "Epoch 37\tTop1 Train accuracy 70.625\tTop1 Test accuracy: 67.85888671875\tTop5 test acc: 98.13232421875\n", + "Epoch 38\tTop1 Train accuracy 70.78125\tTop1 Test accuracy: 67.88330078125\tTop5 test acc: 98.13232421875\n", + "Epoch 39\tTop1 Train accuracy 70.91796875\tTop1 Test accuracy: 67.919921875\tTop5 test acc: 98.10791015625\n", + "Epoch 40\tTop1 Train accuracy 70.95703125\tTop1 Test accuracy: 67.95654296875\tTop5 test acc: 98.10791015625\n", + "Epoch 41\tTop1 Train accuracy 71.03515625\tTop1 Test accuracy: 68.00537109375\tTop5 test acc: 98.1201171875\n", + "Epoch 42\tTop1 Train accuracy 71.07421875\tTop1 Test accuracy: 68.06640625\tTop5 test acc: 98.15673828125\n", + "Epoch 43\tTop1 Train accuracy 71.15234375\tTop1 Test accuracy: 68.12744140625\tTop5 test acc: 98.15673828125\n", + "Epoch 44\tTop1 Train accuracy 71.2109375\tTop1 Test accuracy: 68.1396484375\tTop5 test acc: 98.1689453125\n", + "Epoch 45\tTop1 Train accuracy 71.25\tTop1 Test accuracy: 68.1396484375\tTop5 test acc: 98.1689453125\n", + "Epoch 46\tTop1 Train accuracy 71.46484375\tTop1 Test accuracy: 68.15185546875\tTop5 test acc: 98.193359375\n", + "Epoch 47\tTop1 Train accuracy 71.58203125\tTop1 Test accuracy: 68.22509765625\tTop5 test acc: 98.2177734375\n", + "Epoch 48\tTop1 Train accuracy 71.6796875\tTop1 Test accuracy: 68.27392578125\tTop5 test acc: 98.22998046875\n", + "Epoch 49\tTop1 Train accuracy 71.8359375\tTop1 Test accuracy: 68.3349609375\tTop5 test acc: 98.22998046875\n", + "Epoch 50\tTop1 Train accuracy 71.93359375\tTop1 Test accuracy: 68.44482421875\tTop5 test acc: 98.2421875\n", + "Epoch 51\tTop1 Train accuracy 72.01171875\tTop1 Test accuracy: 68.4814453125\tTop5 test acc: 98.2177734375\n", + "Epoch 52\tTop1 Train accuracy 72.0703125\tTop1 Test accuracy: 68.505859375\tTop5 test acc: 98.2177734375\n", + "Epoch 53\tTop1 Train accuracy 72.2265625\tTop1 Test accuracy: 68.54248046875\tTop5 test acc: 98.22998046875\n", + "Epoch 54\tTop1 Train accuracy 72.24609375\tTop1 Test accuracy: 68.5791015625\tTop5 test acc: 98.22998046875\n", + "Epoch 55\tTop1 Train accuracy 72.34375\tTop1 Test accuracy: 68.65234375\tTop5 test acc: 98.25439453125\n", + "Epoch 56\tTop1 Train accuracy 72.421875\tTop1 Test accuracy: 68.71337890625\tTop5 test acc: 98.3154296875\n", + "Epoch 57\tTop1 Train accuracy 72.51953125\tTop1 Test accuracy: 68.71337890625\tTop5 test acc: 98.3154296875\n", + "Epoch 58\tTop1 Train accuracy 72.94921875\tTop1 Test accuracy: 68.76220703125\tTop5 test acc: 98.3154296875\n", + "Epoch 59\tTop1 Train accuracy 72.98828125\tTop1 Test accuracy: 68.83544921875\tTop5 test acc: 98.3154296875\n", + "Epoch 60\tTop1 Train accuracy 73.0859375\tTop1 Test accuracy: 68.88427734375\tTop5 test acc: 98.30322265625\n", + "Epoch 61\tTop1 Train accuracy 73.18359375\tTop1 Test accuracy: 68.896484375\tTop5 test acc: 98.32763671875\n", + "Epoch 62\tTop1 Train accuracy 73.3984375\tTop1 Test accuracy: 68.88427734375\tTop5 test acc: 98.33984375\n", + "Epoch 63\tTop1 Train accuracy 73.4375\tTop1 Test accuracy: 68.95751953125\tTop5 test acc: 98.33984375\n", + "Epoch 64\tTop1 Train accuracy 73.515625\tTop1 Test accuracy: 68.994140625\tTop5 test acc: 98.32763671875\n", + "Epoch 65\tTop1 Train accuracy 73.57421875\tTop1 Test accuracy: 68.9697265625\tTop5 test acc: 98.3154296875\n", + "Epoch 66\tTop1 Train accuracy 73.61328125\tTop1 Test accuracy: 69.03076171875\tTop5 test acc: 98.32763671875\n", + "Epoch 67\tTop1 Train accuracy 73.671875\tTop1 Test accuracy: 69.07958984375\tTop5 test acc: 98.3154296875\n", + "Epoch 68\tTop1 Train accuracy 73.7109375\tTop1 Test accuracy: 69.12841796875\tTop5 test acc: 98.3154296875\n", + "Epoch 69\tTop1 Train accuracy 73.8671875\tTop1 Test accuracy: 69.20166015625\tTop5 test acc: 98.3154296875\n", + "Epoch 70\tTop1 Train accuracy 73.984375\tTop1 Test accuracy: 69.25048828125\tTop5 test acc: 98.33984375\n", + "Epoch 71\tTop1 Train accuracy 74.00390625\tTop1 Test accuracy: 69.2626953125\tTop5 test acc: 98.35205078125\n", + "Epoch 72\tTop1 Train accuracy 74.00390625\tTop1 Test accuracy: 69.3115234375\tTop5 test acc: 98.33984375\n", + "Epoch 73\tTop1 Train accuracy 74.0234375\tTop1 Test accuracy: 69.34814453125\tTop5 test acc: 98.35205078125\n", + "Epoch 74\tTop1 Train accuracy 74.140625\tTop1 Test accuracy: 69.37255859375\tTop5 test acc: 98.33984375\n", + "Epoch 75\tTop1 Train accuracy 74.23828125\tTop1 Test accuracy: 69.4091796875\tTop5 test acc: 98.35205078125\n", + "Epoch 76\tTop1 Train accuracy 74.31640625\tTop1 Test accuracy: 69.4091796875\tTop5 test acc: 98.37646484375\n", + "Epoch 77\tTop1 Train accuracy 74.43359375\tTop1 Test accuracy: 69.4091796875\tTop5 test acc: 98.3642578125\n", + "Epoch 78\tTop1 Train accuracy 74.55078125\tTop1 Test accuracy: 69.3603515625\tTop5 test acc: 98.3642578125\n", + "Epoch 79\tTop1 Train accuracy 74.58984375\tTop1 Test accuracy: 69.37255859375\tTop5 test acc: 98.3642578125\n", + "Epoch 80\tTop1 Train accuracy 74.609375\tTop1 Test accuracy: 69.42138671875\tTop5 test acc: 98.3642578125\n", + "Epoch 81\tTop1 Train accuracy 74.6484375\tTop1 Test accuracy: 69.49462890625\tTop5 test acc: 98.3642578125\n", + "Epoch 82\tTop1 Train accuracy 74.6875\tTop1 Test accuracy: 69.47021484375\tTop5 test acc: 98.35205078125\n", + "Epoch 83\tTop1 Train accuracy 74.7265625\tTop1 Test accuracy: 69.5556640625\tTop5 test acc: 98.35205078125\n", + "Epoch 84\tTop1 Train accuracy 74.78515625\tTop1 Test accuracy: 69.59228515625\tTop5 test acc: 98.35205078125\n", + "Epoch 85\tTop1 Train accuracy 74.8828125\tTop1 Test accuracy: 69.6533203125\tTop5 test acc: 98.35205078125\n", + "Epoch 86\tTop1 Train accuracy 74.94140625\tTop1 Test accuracy: 69.677734375\tTop5 test acc: 98.3642578125\n", + "Epoch 87\tTop1 Train accuracy 75.0390625\tTop1 Test accuracy: 69.7509765625\tTop5 test acc: 98.35205078125\n", + "Epoch 88\tTop1 Train accuracy 75.0390625\tTop1 Test accuracy: 69.71435546875\tTop5 test acc: 98.35205078125\n", + "Epoch 89\tTop1 Train accuracy 75.1171875\tTop1 Test accuracy: 69.775390625\tTop5 test acc: 98.33984375\n", + "Epoch 90\tTop1 Train accuracy 75.21484375\tTop1 Test accuracy: 69.7509765625\tTop5 test acc: 98.33984375\n", + "Epoch 91\tTop1 Train accuracy 75.25390625\tTop1 Test accuracy: 69.82421875\tTop5 test acc: 98.32763671875\n", + "Epoch 92\tTop1 Train accuracy 75.29296875\tTop1 Test accuracy: 69.86083984375\tTop5 test acc: 98.33984375\n", + "Epoch 93\tTop1 Train accuracy 75.33203125\tTop1 Test accuracy: 69.88525390625\tTop5 test acc: 98.35205078125\n", + "Epoch 94\tTop1 Train accuracy 75.37109375\tTop1 Test accuracy: 69.81201171875\tTop5 test acc: 98.3642578125\n", + "Epoch 95\tTop1 Train accuracy 75.37109375\tTop1 Test accuracy: 69.83642578125\tTop5 test acc: 98.37646484375\n", + "Epoch 96\tTop1 Train accuracy 75.37109375\tTop1 Test accuracy: 69.83642578125\tTop5 test acc: 98.37646484375\n", + "Epoch 97\tTop1 Train accuracy 75.41015625\tTop1 Test accuracy: 69.86083984375\tTop5 test acc: 98.37646484375\n", + "Epoch 98\tTop1 Train accuracy 75.41015625\tTop1 Test accuracy: 69.90966796875\tTop5 test acc: 98.37646484375\n", + "Epoch 99\tTop1 Train accuracy 75.46875\tTop1 Test accuracy: 69.921875\tTop5 test acc: 98.37646484375\n" ], "name": "stdout" } @@ -835,8 +588,8 @@ "source": [ "" ], - "execution_count": null, + "execution_count": 18, "outputs": [] } ] -} +} \ No newline at end of file From 63e46b0d0e711a74e731410767e1e4ab62c0d0a5 Mon Sep 17 00:00:00 2001 From: Thalles Silva Date: Sun, 17 Jan 2021 20:28:37 -0300 Subject: [PATCH 3/4] Created using Colaboratory --- ..._batch_logistic_regression_evaluator.ipynb | 27 +++++++++---------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/feature_eval/mini_batch_logistic_regression_evaluator.ipynb b/feature_eval/mini_batch_logistic_regression_evaluator.ipynb index f64e920..faa96aa 100644 --- a/feature_eval/mini_batch_logistic_regression_evaluator.ipynb +++ b/feature_eval/mini_batch_logistic_regression_evaluator.ipynb @@ -24,10 +24,7 @@ "provenance": [], "include_colab_link": true }, - "accelerator": "GPU", - "widgets": { - "application/vnd.jupyter.widget-state+json": {} - } + "accelerator": "GPU" }, "cells": [ { @@ -228,7 +225,7 @@ "id": "BfIPl0G6_RrT" }, "source": [ - "def get_stl10_data_loaders(download, shuffle=False, batch_size=128):\n", + "def get_stl10_data_loaders(download, shuffle=False, batch_size=256):\n", " train_dataset = datasets.STL10('./data', split='train', download=download,\n", " transform=transforms.ToTensor())\n", "\n", @@ -242,7 +239,7 @@ " num_workers=10, drop_last=False, shuffle=shuffle)\n", " return train_loader, test_loader\n", "\n", - "def get_cifar10_data_loaders(download, shuffle=False, batch_size=128):\n", + "def get_cifar10_data_loaders(download, shuffle=False, batch_size=256):\n", " train_dataset = datasets.CIFAR10('./data', train=True, download=download,\n", " transform=transforms.ToTensor())\n", "\n", @@ -282,7 +279,7 @@ "elif config.arch == 'resnet50':\n", " model = torchvision.models.resnet50(pretrained=False, num_classes=10).to(device)" ], - "execution_count": 11, + "execution_count": null, "outputs": [] }, { @@ -302,7 +299,7 @@ " state_dict[k[len(\"backbone.\"):]] = state_dict[k]\n", " del state_dict[k]" ], - "execution_count": 12, + "execution_count": null, "outputs": [] }, { @@ -314,7 +311,7 @@ "log = model.load_state_dict(state_dict, strict=False)\n", "assert log.missing_keys == ['fc.weight', 'fc.bias']" ], - "execution_count": 13, + "execution_count": null, "outputs": [] }, { @@ -337,7 +334,7 @@ " train_loader, test_loader = get_stl10_data_loaders(download=True)\n", "print(\"Dataset:\", config.dataset_name)" ], - "execution_count": 14, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -387,7 +384,7 @@ "parameters = list(filter(lambda p: p.requires_grad, model.parameters()))\n", "assert len(parameters) == 2 # fc.weight, fc.bias" ], - "execution_count": 15, + "execution_count": null, "outputs": [] }, { @@ -399,7 +396,7 @@ "optimizer = torch.optim.Adam(model.parameters(), lr=3e-4, weight_decay=0.0008)\n", "criterion = torch.nn.CrossEntropyLoss().to(device)" ], - "execution_count": 16, + "execution_count": null, "outputs": [] }, { @@ -424,7 +421,7 @@ " res.append(correct_k.mul_(100.0 / batch_size))\n", " return res" ], - "execution_count": 17, + "execution_count": null, "outputs": [] }, { @@ -470,7 +467,7 @@ " top5_accuracy /= (counter + 1)\n", " print(f\"Epoch {epoch}\\tTop1 Train accuracy {top1_train_accuracy.item()}\\tTop1 Test accuracy: {top1_accuracy.item()}\\tTop5 test acc: {top5_accuracy.item()}\")" ], - "execution_count": 18, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -588,7 +585,7 @@ "source": [ "" ], - "execution_count": 18, + "execution_count": null, "outputs": [] } ] From 2cc1e5d96949fa703d94b923a05c9fcaf483e32b Mon Sep 17 00:00:00 2001 From: Thalles Silva Date: Sun, 17 Jan 2021 21:12:17 -0300 Subject: [PATCH 4/4] Created using Colaboratory --- ..._batch_logistic_regression_evaluator.ipynb | 567 ++++++++++++------ 1 file changed, 398 insertions(+), 169 deletions(-) diff --git a/feature_eval/mini_batch_logistic_regression_evaluator.ipynb b/feature_eval/mini_batch_logistic_regression_evaluator.ipynb index faa96aa..809368f 100644 --- a/feature_eval/mini_batch_logistic_regression_evaluator.ipynb +++ b/feature_eval/mini_batch_logistic_regression_evaluator.ipynb @@ -24,7 +24,256 @@ "provenance": [], "include_colab_link": true }, - "accelerator": "GPU" + "accelerator": "GPU", + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "1b97f76ec8314fe3985e9183af3fdd9b": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HBoxModel", + "state": { + "_view_name": "HBoxView", + "_dom_classes": [], + "_model_name": "HBoxModel", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.5.0", + "box_style": "", + "layout": "IPY_MODEL_1d516174fefa4c26a1d9232a9fc7e34b", + "_model_module": "@jupyter-widgets/controls", + "children": [ + "IPY_MODEL_f72a8a93cdd14fa4bfdc34fbf1061f1e", + "IPY_MODEL_8a684a8419754a86b7b70b9d26b252a4" + ] + } + }, + "1d516174fefa4c26a1d9232a9fc7e34b": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "f72a8a93cdd14fa4bfdc34fbf1061f1e": { + "model_module": "@jupyter-widgets/controls", + "model_name": "FloatProgressModel", + "state": { + "_view_name": "ProgressView", + "style": "IPY_MODEL_1a4df18ac4034be1acc4b8ef56527fd1", + "_dom_classes": [], + "description": "", + "_model_name": "FloatProgressModel", + "bar_style": "info", + "max": 1, + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": 1, + "_view_count": null, + "_view_module_version": "1.5.0", + "orientation": "horizontal", + "min": 0, + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_89b38536b9da4cfdb914fd291aca0dfe" + } + }, + "8a684a8419754a86b7b70b9d26b252a4": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "state": { + "_view_name": "HTMLView", + "style": "IPY_MODEL_77da6ecf9d63460ab420d41f28bb7f1d", + "_dom_classes": [], + "description": "", + "_model_name": "HTMLModel", + "placeholder": "​", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": " 170500096/? [00:20<00:00, 54507700.03it/s]", + "_view_count": null, + "_view_module_version": "1.5.0", + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_45b89ec6a3504560b9643422cee95213" + } + }, + "1a4df18ac4034be1acc4b8ef56527fd1": { + "model_module": "@jupyter-widgets/controls", + "model_name": "ProgressStyleModel", + "state": { + "_view_name": "StyleView", + "_model_name": "ProgressStyleModel", + "description_width": "initial", + "_view_module": "@jupyter-widgets/base", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.2.0", + "bar_color": null, + "_model_module": "@jupyter-widgets/controls" + } + }, + "89b38536b9da4cfdb914fd291aca0dfe": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + }, + "77da6ecf9d63460ab420d41f28bb7f1d": { + "model_module": "@jupyter-widgets/controls", + "model_name": "DescriptionStyleModel", + "state": { + "_view_name": "StyleView", + "_model_name": "DescriptionStyleModel", + "description_width": "", + "_view_module": "@jupyter-widgets/base", + "_model_module_version": "1.5.0", + "_view_count": null, + "_view_module_version": "1.2.0", + "_model_module": "@jupyter-widgets/controls" + } + }, + "45b89ec6a3504560b9643422cee95213": { + "model_module": "@jupyter-widgets/base", + "model_name": "LayoutModel", + "state": { + "_view_name": "LayoutView", + "grid_template_rows": null, + "right": null, + "justify_content": null, + "_view_module": "@jupyter-widgets/base", + "overflow": null, + "_model_module_version": "1.2.0", + "_view_count": null, + "flex_flow": null, + "width": null, + "min_width": null, + "border": null, + "align_items": null, + "bottom": null, + "_model_module": "@jupyter-widgets/base", + "top": null, + "grid_column": null, + "overflow_y": null, + "overflow_x": null, + "grid_auto_flow": null, + "grid_area": null, + "grid_template_columns": null, + "flex": null, + "_model_name": "LayoutModel", + "justify_items": null, + "grid_row": null, + "max_height": null, + "align_content": null, + "visibility": null, + "align_self": null, + "height": null, + "min_height": null, + "padding": null, + "grid_auto_rows": null, + "grid_gap": null, + "max_width": null, + "order": null, + "_view_module_version": "1.2.0", + "grid_template_areas": null, + "object_position": null, + "object_fit": null, + "grid_auto_columns": null, + "margin": null, + "display": null, + "left": null + } + } + } + } }, "cells": [ { @@ -51,7 +300,7 @@ "import matplotlib.pyplot as plt\n", "import torchvision" ], - "execution_count": null, + "execution_count": 10, "outputs": [] }, { @@ -61,12 +310,12 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "e44ac358-6480-4a5f-a358-6eb6ace26c8b" + "outputId": "a6477424-66e6-4a59-bef2-42e5cbada7cf" }, "source": [ "!pip install gdown" ], - "execution_count": null, + "execution_count": 11, "outputs": [ { "output_type": "stream", @@ -77,8 +326,8 @@ "Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from gdown) (4.41.1)\n", "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (1.24.3)\n", "Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (3.0.4)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (2020.12.5)\n", - "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (2.10)\n" + "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (2.10)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (2020.12.5)\n" ], "name": "stdout" } @@ -95,7 +344,7 @@ " 'resnet18_100-epochs_cifar10': '1lc2aoVtrAetGn0PnTkOyFzPCIucOJq7C'}\n", " return file_id.get(folder_name, \"Model not found.\")" ], - "execution_count": null, + "execution_count": 12, "outputs": [] }, { @@ -105,19 +354,19 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "36932a7d-c7e5-492a-f37d-8be6b18f787a" + "outputId": "da3bc13b-f989-4a19-dc02-5172e5e370c0" }, "source": [ - "folder_name = 'resnet18_100-epochs_stl10'\n", + "folder_name = 'resnet18_100-epochs_cifar10'\n", "file_id = get_file_id_by_model(folder_name)\n", "print(folder_name, file_id)" ], - "execution_count": null, + "execution_count": 13, "outputs": [ { "output_type": "stream", "text": [ - "resnet18_100-epochs_stl10 14_nH2FkyKbt61cieQDiSbBVNP8-gtwgF\n" + "resnet18_100-epochs_cifar10 1lc2aoVtrAetGn0PnTkOyFzPCIucOJq7C\n" ], "name": "stdout" } @@ -130,7 +379,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "8d52756d-707b-4a3f-9e8c-0d191408deab" + "outputId": "63d1d89d-ad11-48ba-8bb3-4da15b930073" }, "source": [ "# download and extract model files\n", @@ -138,45 +387,18 @@ "os.system('unzip {}'.format(folder_name))\n", "!ls" ], - "execution_count": null, + "execution_count": 14, "outputs": [ { "output_type": "stream", "text": [ "checkpoint_0100.pth.tar\n", "config.yml\n", - "events.out.tfevents.1610901470.4cb2c837708d.2683858.0\n", - "resnet18_100-epochs_stl10.zip\n", - "sample_data\n", - "training.log\n" - ], - "name": "stdout" - } - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "ooyhd8piZ1w1", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "6ffb73aa-35c5-4df2-bd1f-6de6a235a9e5" - }, - "source": [ - "!unzip resnet18_100-epochs_stl10" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "text": [ - "Archive: resnet18_100-epochs_stl10.zip\n", - "replace checkpoint_0100.pth.tar? [y]es, [n]o, [A]ll, [N]one, [r]ename: A\n", - " inflating: checkpoint_0100.pth.tar \n", - " inflating: config.yml \n", - " inflating: events.out.tfevents.1610901470.4cb2c837708d.2683858.0 \n", - " inflating: training.log \n" + "events.out.tfevents.1610901418.4cb2c837708d.2683796.0\n", + "resnet18_100-epochs_cifar10.zip\n", + "resnet18_100-epochs-cifar10.zip\n", + "run.log\n", + "sample_data\n" ], "name": "stdout" } @@ -192,7 +414,7 @@ "import torchvision.transforms as transforms\n", "from torchvision import datasets" ], - "execution_count": null, + "execution_count": 15, "outputs": [] }, { @@ -202,13 +424,13 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "5f58bd9b-4428-4b8c-e271-b47ca6694f34" + "outputId": "028ac120-c51d-4eb2-cf00-da69aed6e310" }, "source": [ "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n", "print(\"Using device:\", device)" ], - "execution_count": null, + "execution_count": 16, "outputs": [ { "output_type": "stream", @@ -253,7 +475,7 @@ " num_workers=10, drop_last=False, shuffle=shuffle)\n", " return train_loader, test_loader" ], - "execution_count": null, + "execution_count": 17, "outputs": [] }, { @@ -265,7 +487,7 @@ "with open(os.path.join('./config.yml')) as file:\n", " config = yaml.load(file)" ], - "execution_count": null, + "execution_count": 18, "outputs": [] }, { @@ -279,7 +501,7 @@ "elif config.arch == 'resnet50':\n", " model = torchvision.models.resnet50(pretrained=False, num_classes=10).to(device)" ], - "execution_count": null, + "execution_count": 19, "outputs": [] }, { @@ -299,7 +521,7 @@ " state_dict[k[len(\"backbone.\"):]] = state_dict[k]\n", " del state_dict[k]" ], - "execution_count": null, + "execution_count": 20, "outputs": [] }, { @@ -311,7 +533,7 @@ "log = model.load_state_dict(state_dict, strict=False)\n", "assert log.missing_keys == ['fc.weight', 'fc.bias']" ], - "execution_count": null, + "execution_count": 21, "outputs": [] }, { @@ -320,12 +542,19 @@ "id": "_GC0a14uWRr6", "colab": { "base_uri": "https://localhost:8080/", - "height": 102, + "height": 117, "referenced_widgets": [ - "48ebf2f69d1f4f5a9208cd2923eb5eac" + "1b97f76ec8314fe3985e9183af3fdd9b", + "1d516174fefa4c26a1d9232a9fc7e34b", + "f72a8a93cdd14fa4bfdc34fbf1061f1e", + "8a684a8419754a86b7b70b9d26b252a4", + "1a4df18ac4034be1acc4b8ef56527fd1", + "89b38536b9da4cfdb914fd291aca0dfe", + "77da6ecf9d63460ab420d41f28bb7f1d", + "45b89ec6a3504560b9643422cee95213" ] }, - "outputId": "6c3b86ad-b568-4c68-c1fb-1f7b2abbb6aa" + "outputId": "4382995f-e0fa-48fc-d341-71400a06b6d9" }, "source": [ "if config.dataset_name == 'cifar10':\n", @@ -334,12 +563,12 @@ " train_loader, test_loader = get_stl10_data_loaders(download=True)\n", "print(\"Dataset:\", config.dataset_name)" ], - "execution_count": null, + "execution_count": 22, "outputs": [ { "output_type": "stream", "text": [ - "Downloading http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz to ./data/stl10_binary.tar.gz\n" + "Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ./data/cifar-10-python.tar.gz\n" ], "name": "stdout" }, @@ -347,9 +576,9 @@ "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "48ebf2f69d1f4f5a9208cd2923eb5eac", - "version_major": 2, - "version_minor": 0 + "model_id": "1b97f76ec8314fe3985e9183af3fdd9b", + "version_minor": 0, + "version_major": 2 }, "text/plain": [ "HBox(children=(FloatProgress(value=1.0, bar_style='info', max=1.0), HTML(value='')))" @@ -362,9 +591,9 @@ { "output_type": "stream", "text": [ - "Extracting ./data/stl10_binary.tar.gz to ./data\n", + "Extracting ./data/cifar-10-python.tar.gz to ./data\n", "Files already downloaded and verified\n", - "Dataset: stl10\n" + "Dataset: cifar10\n" ], "name": "stdout" } @@ -384,7 +613,7 @@ "parameters = list(filter(lambda p: p.requires_grad, model.parameters()))\n", "assert len(parameters) == 2 # fc.weight, fc.bias" ], - "execution_count": null, + "execution_count": 23, "outputs": [] }, { @@ -396,7 +625,7 @@ "optimizer = torch.optim.Adam(model.parameters(), lr=3e-4, weight_decay=0.0008)\n", "criterion = torch.nn.CrossEntropyLoss().to(device)" ], - "execution_count": null, + "execution_count": 24, "outputs": [] }, { @@ -421,7 +650,7 @@ " res.append(correct_k.mul_(100.0 / batch_size))\n", " return res" ], - "execution_count": null, + "execution_count": 25, "outputs": [] }, { @@ -431,7 +660,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "d6127d8e-836f-4e69-a344-fee7e836d63a" + "outputId": "48816318-655c-4c2d-b4fa-4549316a8477" }, "source": [ "epochs = 100\n", @@ -467,111 +696,111 @@ " top5_accuracy /= (counter + 1)\n", " print(f\"Epoch {epoch}\\tTop1 Train accuracy {top1_train_accuracy.item()}\\tTop1 Test accuracy: {top1_accuracy.item()}\\tTop5 test acc: {top5_accuracy.item()}\")" ], - "execution_count": null, + "execution_count": 26, "outputs": [ { "output_type": "stream", "text": [ - "Epoch 0\tTop1 Train accuracy 27.890625\tTop1 Test accuracy: 42.05322265625\tTop5 test acc: 93.29833984375\n", - "Epoch 1\tTop1 Train accuracy 49.921875\tTop1 Test accuracy: 54.45556640625\tTop5 test acc: 96.1181640625\n", - "Epoch 2\tTop1 Train accuracy 57.3828125\tTop1 Test accuracy: 58.9599609375\tTop5 test acc: 96.9482421875\n", - "Epoch 3\tTop1 Train accuracy 60.01953125\tTop1 Test accuracy: 60.38818359375\tTop5 test acc: 97.03369140625\n", - "Epoch 4\tTop1 Train accuracy 61.7578125\tTop1 Test accuracy: 61.572265625\tTop5 test acc: 97.1923828125\n", - "Epoch 5\tTop1 Train accuracy 62.91015625\tTop1 Test accuracy: 62.21923828125\tTop5 test acc: 97.30224609375\n", - "Epoch 6\tTop1 Train accuracy 63.57421875\tTop1 Test accuracy: 62.6220703125\tTop5 test acc: 97.4365234375\n", - "Epoch 7\tTop1 Train accuracy 64.12109375\tTop1 Test accuracy: 63.18359375\tTop5 test acc: 97.55859375\n", - "Epoch 8\tTop1 Train accuracy 64.82421875\tTop1 Test accuracy: 63.51318359375\tTop5 test acc: 97.57080078125\n", - "Epoch 9\tTop1 Train accuracy 65.17578125\tTop1 Test accuracy: 63.80615234375\tTop5 test acc: 97.59521484375\n", - "Epoch 10\tTop1 Train accuracy 65.5859375\tTop1 Test accuracy: 64.14794921875\tTop5 test acc: 97.6318359375\n", - "Epoch 11\tTop1 Train accuracy 65.80078125\tTop1 Test accuracy: 64.51416015625\tTop5 test acc: 97.61962890625\n", - "Epoch 12\tTop1 Train accuracy 66.03515625\tTop1 Test accuracy: 64.70947265625\tTop5 test acc: 97.69287109375\n", - "Epoch 13\tTop1 Train accuracy 66.42578125\tTop1 Test accuracy: 64.88037109375\tTop5 test acc: 97.705078125\n", - "Epoch 14\tTop1 Train accuracy 66.9140625\tTop1 Test accuracy: 65.07568359375\tTop5 test acc: 97.76611328125\n", - "Epoch 15\tTop1 Train accuracy 67.265625\tTop1 Test accuracy: 65.24658203125\tTop5 test acc: 97.81494140625\n", - "Epoch 16\tTop1 Train accuracy 67.48046875\tTop1 Test accuracy: 65.46630859375\tTop5 test acc: 97.8515625\n", - "Epoch 17\tTop1 Train accuracy 67.6171875\tTop1 Test accuracy: 65.71044921875\tTop5 test acc: 97.86376953125\n", - "Epoch 18\tTop1 Train accuracy 67.83203125\tTop1 Test accuracy: 65.966796875\tTop5 test acc: 97.8759765625\n", - "Epoch 19\tTop1 Train accuracy 68.0078125\tTop1 Test accuracy: 66.05224609375\tTop5 test acc: 97.88818359375\n", - "Epoch 20\tTop1 Train accuracy 68.1640625\tTop1 Test accuracy: 66.17431640625\tTop5 test acc: 97.88818359375\n", - "Epoch 21\tTop1 Train accuracy 68.37890625\tTop1 Test accuracy: 66.30859375\tTop5 test acc: 97.900390625\n", - "Epoch 22\tTop1 Train accuracy 68.49609375\tTop1 Test accuracy: 66.50390625\tTop5 test acc: 97.88818359375\n", - "Epoch 23\tTop1 Train accuracy 68.75\tTop1 Test accuracy: 66.6259765625\tTop5 test acc: 97.91259765625\n", - "Epoch 24\tTop1 Train accuracy 68.90625\tTop1 Test accuracy: 66.68701171875\tTop5 test acc: 97.96142578125\n", - "Epoch 25\tTop1 Train accuracy 68.984375\tTop1 Test accuracy: 66.8212890625\tTop5 test acc: 97.998046875\n", - "Epoch 26\tTop1 Train accuracy 69.39453125\tTop1 Test accuracy: 66.9677734375\tTop5 test acc: 98.0224609375\n", - "Epoch 27\tTop1 Train accuracy 69.4921875\tTop1 Test accuracy: 67.1142578125\tTop5 test acc: 98.01025390625\n", - "Epoch 28\tTop1 Train accuracy 69.6484375\tTop1 Test accuracy: 67.1630859375\tTop5 test acc: 98.0224609375\n", - "Epoch 29\tTop1 Train accuracy 69.7265625\tTop1 Test accuracy: 67.19970703125\tTop5 test acc: 98.03466796875\n", - "Epoch 30\tTop1 Train accuracy 69.74609375\tTop1 Test accuracy: 67.24853515625\tTop5 test acc: 98.05908203125\n", - "Epoch 31\tTop1 Train accuracy 69.921875\tTop1 Test accuracy: 67.37060546875\tTop5 test acc: 98.03466796875\n", - "Epoch 32\tTop1 Train accuracy 70.078125\tTop1 Test accuracy: 67.46826171875\tTop5 test acc: 98.03466796875\n", - "Epoch 33\tTop1 Train accuracy 70.25390625\tTop1 Test accuracy: 67.5048828125\tTop5 test acc: 98.0712890625\n", - "Epoch 34\tTop1 Train accuracy 70.33203125\tTop1 Test accuracy: 67.59033203125\tTop5 test acc: 98.095703125\n", - "Epoch 35\tTop1 Train accuracy 70.48828125\tTop1 Test accuracy: 67.73681640625\tTop5 test acc: 98.13232421875\n", - "Epoch 36\tTop1 Train accuracy 70.5859375\tTop1 Test accuracy: 67.83447265625\tTop5 test acc: 98.1201171875\n", - "Epoch 37\tTop1 Train accuracy 70.625\tTop1 Test accuracy: 67.85888671875\tTop5 test acc: 98.13232421875\n", - "Epoch 38\tTop1 Train accuracy 70.78125\tTop1 Test accuracy: 67.88330078125\tTop5 test acc: 98.13232421875\n", - "Epoch 39\tTop1 Train accuracy 70.91796875\tTop1 Test accuracy: 67.919921875\tTop5 test acc: 98.10791015625\n", - "Epoch 40\tTop1 Train accuracy 70.95703125\tTop1 Test accuracy: 67.95654296875\tTop5 test acc: 98.10791015625\n", - "Epoch 41\tTop1 Train accuracy 71.03515625\tTop1 Test accuracy: 68.00537109375\tTop5 test acc: 98.1201171875\n", - "Epoch 42\tTop1 Train accuracy 71.07421875\tTop1 Test accuracy: 68.06640625\tTop5 test acc: 98.15673828125\n", - "Epoch 43\tTop1 Train accuracy 71.15234375\tTop1 Test accuracy: 68.12744140625\tTop5 test acc: 98.15673828125\n", - "Epoch 44\tTop1 Train accuracy 71.2109375\tTop1 Test accuracy: 68.1396484375\tTop5 test acc: 98.1689453125\n", - "Epoch 45\tTop1 Train accuracy 71.25\tTop1 Test accuracy: 68.1396484375\tTop5 test acc: 98.1689453125\n", - "Epoch 46\tTop1 Train accuracy 71.46484375\tTop1 Test accuracy: 68.15185546875\tTop5 test acc: 98.193359375\n", - "Epoch 47\tTop1 Train accuracy 71.58203125\tTop1 Test accuracy: 68.22509765625\tTop5 test acc: 98.2177734375\n", - "Epoch 48\tTop1 Train accuracy 71.6796875\tTop1 Test accuracy: 68.27392578125\tTop5 test acc: 98.22998046875\n", - "Epoch 49\tTop1 Train accuracy 71.8359375\tTop1 Test accuracy: 68.3349609375\tTop5 test acc: 98.22998046875\n", - "Epoch 50\tTop1 Train accuracy 71.93359375\tTop1 Test accuracy: 68.44482421875\tTop5 test acc: 98.2421875\n", - "Epoch 51\tTop1 Train accuracy 72.01171875\tTop1 Test accuracy: 68.4814453125\tTop5 test acc: 98.2177734375\n", - "Epoch 52\tTop1 Train accuracy 72.0703125\tTop1 Test accuracy: 68.505859375\tTop5 test acc: 98.2177734375\n", - "Epoch 53\tTop1 Train accuracy 72.2265625\tTop1 Test accuracy: 68.54248046875\tTop5 test acc: 98.22998046875\n", - "Epoch 54\tTop1 Train accuracy 72.24609375\tTop1 Test accuracy: 68.5791015625\tTop5 test acc: 98.22998046875\n", - "Epoch 55\tTop1 Train accuracy 72.34375\tTop1 Test accuracy: 68.65234375\tTop5 test acc: 98.25439453125\n", - "Epoch 56\tTop1 Train accuracy 72.421875\tTop1 Test accuracy: 68.71337890625\tTop5 test acc: 98.3154296875\n", - "Epoch 57\tTop1 Train accuracy 72.51953125\tTop1 Test accuracy: 68.71337890625\tTop5 test acc: 98.3154296875\n", - "Epoch 58\tTop1 Train accuracy 72.94921875\tTop1 Test accuracy: 68.76220703125\tTop5 test acc: 98.3154296875\n", - "Epoch 59\tTop1 Train accuracy 72.98828125\tTop1 Test accuracy: 68.83544921875\tTop5 test acc: 98.3154296875\n", - "Epoch 60\tTop1 Train accuracy 73.0859375\tTop1 Test accuracy: 68.88427734375\tTop5 test acc: 98.30322265625\n", - "Epoch 61\tTop1 Train accuracy 73.18359375\tTop1 Test accuracy: 68.896484375\tTop5 test acc: 98.32763671875\n", - "Epoch 62\tTop1 Train accuracy 73.3984375\tTop1 Test accuracy: 68.88427734375\tTop5 test acc: 98.33984375\n", - "Epoch 63\tTop1 Train accuracy 73.4375\tTop1 Test accuracy: 68.95751953125\tTop5 test acc: 98.33984375\n", - "Epoch 64\tTop1 Train accuracy 73.515625\tTop1 Test accuracy: 68.994140625\tTop5 test acc: 98.32763671875\n", - "Epoch 65\tTop1 Train accuracy 73.57421875\tTop1 Test accuracy: 68.9697265625\tTop5 test acc: 98.3154296875\n", - "Epoch 66\tTop1 Train accuracy 73.61328125\tTop1 Test accuracy: 69.03076171875\tTop5 test acc: 98.32763671875\n", - "Epoch 67\tTop1 Train accuracy 73.671875\tTop1 Test accuracy: 69.07958984375\tTop5 test acc: 98.3154296875\n", - "Epoch 68\tTop1 Train accuracy 73.7109375\tTop1 Test accuracy: 69.12841796875\tTop5 test acc: 98.3154296875\n", - "Epoch 69\tTop1 Train accuracy 73.8671875\tTop1 Test accuracy: 69.20166015625\tTop5 test acc: 98.3154296875\n", - "Epoch 70\tTop1 Train accuracy 73.984375\tTop1 Test accuracy: 69.25048828125\tTop5 test acc: 98.33984375\n", - "Epoch 71\tTop1 Train accuracy 74.00390625\tTop1 Test accuracy: 69.2626953125\tTop5 test acc: 98.35205078125\n", - "Epoch 72\tTop1 Train accuracy 74.00390625\tTop1 Test accuracy: 69.3115234375\tTop5 test acc: 98.33984375\n", - "Epoch 73\tTop1 Train accuracy 74.0234375\tTop1 Test accuracy: 69.34814453125\tTop5 test acc: 98.35205078125\n", - "Epoch 74\tTop1 Train accuracy 74.140625\tTop1 Test accuracy: 69.37255859375\tTop5 test acc: 98.33984375\n", - "Epoch 75\tTop1 Train accuracy 74.23828125\tTop1 Test accuracy: 69.4091796875\tTop5 test acc: 98.35205078125\n", - "Epoch 76\tTop1 Train accuracy 74.31640625\tTop1 Test accuracy: 69.4091796875\tTop5 test acc: 98.37646484375\n", - "Epoch 77\tTop1 Train accuracy 74.43359375\tTop1 Test accuracy: 69.4091796875\tTop5 test acc: 98.3642578125\n", - "Epoch 78\tTop1 Train accuracy 74.55078125\tTop1 Test accuracy: 69.3603515625\tTop5 test acc: 98.3642578125\n", - "Epoch 79\tTop1 Train accuracy 74.58984375\tTop1 Test accuracy: 69.37255859375\tTop5 test acc: 98.3642578125\n", - "Epoch 80\tTop1 Train accuracy 74.609375\tTop1 Test accuracy: 69.42138671875\tTop5 test acc: 98.3642578125\n", - "Epoch 81\tTop1 Train accuracy 74.6484375\tTop1 Test accuracy: 69.49462890625\tTop5 test acc: 98.3642578125\n", - "Epoch 82\tTop1 Train accuracy 74.6875\tTop1 Test accuracy: 69.47021484375\tTop5 test acc: 98.35205078125\n", - "Epoch 83\tTop1 Train accuracy 74.7265625\tTop1 Test accuracy: 69.5556640625\tTop5 test acc: 98.35205078125\n", - "Epoch 84\tTop1 Train accuracy 74.78515625\tTop1 Test accuracy: 69.59228515625\tTop5 test acc: 98.35205078125\n", - "Epoch 85\tTop1 Train accuracy 74.8828125\tTop1 Test accuracy: 69.6533203125\tTop5 test acc: 98.35205078125\n", - "Epoch 86\tTop1 Train accuracy 74.94140625\tTop1 Test accuracy: 69.677734375\tTop5 test acc: 98.3642578125\n", - "Epoch 87\tTop1 Train accuracy 75.0390625\tTop1 Test accuracy: 69.7509765625\tTop5 test acc: 98.35205078125\n", - "Epoch 88\tTop1 Train accuracy 75.0390625\tTop1 Test accuracy: 69.71435546875\tTop5 test acc: 98.35205078125\n", - "Epoch 89\tTop1 Train accuracy 75.1171875\tTop1 Test accuracy: 69.775390625\tTop5 test acc: 98.33984375\n", - "Epoch 90\tTop1 Train accuracy 75.21484375\tTop1 Test accuracy: 69.7509765625\tTop5 test acc: 98.33984375\n", - "Epoch 91\tTop1 Train accuracy 75.25390625\tTop1 Test accuracy: 69.82421875\tTop5 test acc: 98.32763671875\n", - "Epoch 92\tTop1 Train accuracy 75.29296875\tTop1 Test accuracy: 69.86083984375\tTop5 test acc: 98.33984375\n", - "Epoch 93\tTop1 Train accuracy 75.33203125\tTop1 Test accuracy: 69.88525390625\tTop5 test acc: 98.35205078125\n", - "Epoch 94\tTop1 Train accuracy 75.37109375\tTop1 Test accuracy: 69.81201171875\tTop5 test acc: 98.3642578125\n", - "Epoch 95\tTop1 Train accuracy 75.37109375\tTop1 Test accuracy: 69.83642578125\tTop5 test acc: 98.37646484375\n", - "Epoch 96\tTop1 Train accuracy 75.37109375\tTop1 Test accuracy: 69.83642578125\tTop5 test acc: 98.37646484375\n", - "Epoch 97\tTop1 Train accuracy 75.41015625\tTop1 Test accuracy: 69.86083984375\tTop5 test acc: 98.37646484375\n", - "Epoch 98\tTop1 Train accuracy 75.41015625\tTop1 Test accuracy: 69.90966796875\tTop5 test acc: 98.37646484375\n", - "Epoch 99\tTop1 Train accuracy 75.46875\tTop1 Test accuracy: 69.921875\tTop5 test acc: 98.37646484375\n" + "Epoch 0\tTop1 Train accuracy 49.823020935058594\tTop1 Test accuracy: 57.63786697387695\tTop5 test acc: 94.96036529541016\n", + "Epoch 1\tTop1 Train accuracy 59.0130729675293\tTop1 Test accuracy: 59.57088851928711\tTop5 test acc: 95.76114654541016\n", + "Epoch 2\tTop1 Train accuracy 60.604671478271484\tTop1 Test accuracy: 60.32686233520508\tTop5 test acc: 96.07250213623047\n", + "Epoch 3\tTop1 Train accuracy 61.547752380371094\tTop1 Test accuracy: 61.19715118408203\tTop5 test acc: 96.14946746826172\n", + "Epoch 4\tTop1 Train accuracy 62.19586944580078\tTop1 Test accuracy: 61.48035430908203\tTop5 test acc: 96.37407684326172\n", + "Epoch 5\tTop1 Train accuracy 62.677772521972656\tTop1 Test accuracy: 61.784236907958984\tTop5 test acc: 96.40337371826172\n", + "Epoch 6\tTop1 Train accuracy 63.06640625\tTop1 Test accuracy: 62.2346076965332\tTop5 test acc: 96.50102996826172\n", + "Epoch 7\tTop1 Train accuracy 63.40122604370117\tTop1 Test accuracy: 62.52527618408203\tTop5 test acc: 96.46196746826172\n", + "Epoch 8\tTop1 Train accuracy 63.698577880859375\tTop1 Test accuracy: 62.83777618408203\tTop5 test acc: 96.54009246826172\n", + "Epoch 9\tTop1 Train accuracy 63.90983581542969\tTop1 Test accuracy: 63.118682861328125\tTop5 test acc: 96.58892059326172\n", + "Epoch 10\tTop1 Train accuracy 64.14102172851562\tTop1 Test accuracy: 63.20772171020508\tTop5 test acc: 96.68657684326172\n", + "Epoch 11\tTop1 Train accuracy 64.33633422851562\tTop1 Test accuracy: 63.469093322753906\tTop5 test acc: 96.75609588623047\n", + "Epoch 12\tTop1 Train accuracy 64.5057373046875\tTop1 Test accuracy: 63.556983947753906\tTop5 test acc: 96.71703338623047\n", + "Epoch 13\tTop1 Train accuracy 64.6436538696289\tTop1 Test accuracy: 63.66325759887695\tTop5 test acc: 96.69750213623047\n", + "Epoch 14\tTop1 Train accuracy 64.75326538085938\tTop1 Test accuracy: 63.62419509887695\tTop5 test acc: 96.68773651123047\n", + "Epoch 15\tTop1 Train accuracy 64.87284851074219\tTop1 Test accuracy: 63.84650802612305\tTop5 test acc: 96.66820526123047\n", + "Epoch 16\tTop1 Train accuracy 64.97688293457031\tTop1 Test accuracy: 64.00276184082031\tTop5 test acc: 96.72563934326172\n", + "Epoch 17\tTop1 Train accuracy 65.05500793457031\tTop1 Test accuracy: 63.95392990112305\tTop5 test acc: 96.71587371826172\n", + "Epoch 18\tTop1 Train accuracy 65.11439514160156\tTop1 Test accuracy: 64.01252746582031\tTop5 test acc: 96.72563934326172\n", + "Epoch 19\tTop1 Train accuracy 65.21205139160156\tTop1 Test accuracy: 64.07112121582031\tTop5 test acc: 96.71587371826172\n", + "Epoch 20\tTop1 Train accuracy 65.31169891357422\tTop1 Test accuracy: 64.06135559082031\tTop5 test acc: 96.73540496826172\n", + "Epoch 21\tTop1 Train accuracy 65.40338134765625\tTop1 Test accuracy: 64.18830871582031\tTop5 test acc: 96.74517059326172\n", + "Epoch 22\tTop1 Train accuracy 65.45320129394531\tTop1 Test accuracy: 64.1969223022461\tTop5 test acc: 96.74517059326172\n", + "Epoch 23\tTop1 Train accuracy 65.53292083740234\tTop1 Test accuracy: 64.23828125\tTop5 test acc: 96.71587371826172\n", + "Epoch 24\tTop1 Train accuracy 65.61064910888672\tTop1 Test accuracy: 64.30549621582031\tTop5 test acc: 96.71587371826172\n", + "Epoch 25\tTop1 Train accuracy 65.68638610839844\tTop1 Test accuracy: 64.31526184082031\tTop5 test acc: 96.69634246826172\n", + "Epoch 26\tTop1 Train accuracy 65.75055694580078\tTop1 Test accuracy: 64.39338684082031\tTop5 test acc: 96.66704559326172\n", + "Epoch 27\tTop1 Train accuracy 65.80635833740234\tTop1 Test accuracy: 64.40315246582031\tTop5 test acc: 96.67681121826172\n", + "Epoch 28\tTop1 Train accuracy 65.8581771850586\tTop1 Test accuracy: 64.39338684082031\tTop5 test acc: 96.67681121826172\n", + "Epoch 29\tTop1 Train accuracy 65.91397857666016\tTop1 Test accuracy: 64.42268371582031\tTop5 test acc: 96.65727996826172\n", + "Epoch 30\tTop1 Train accuracy 65.96340942382812\tTop1 Test accuracy: 64.42268371582031\tTop5 test acc: 96.63774871826172\n", + "Epoch 31\tTop1 Train accuracy 66.00127410888672\tTop1 Test accuracy: 64.39338684082031\tTop5 test acc: 96.62798309326172\n", + "Epoch 32\tTop1 Train accuracy 66.05707550048828\tTop1 Test accuracy: 64.39338684082031\tTop5 test acc: 96.65727996826172\n", + "Epoch 33\tTop1 Train accuracy 66.10092163085938\tTop1 Test accuracy: 64.43244934082031\tTop5 test acc: 96.66704559326172\n", + "Epoch 34\tTop1 Train accuracy 66.13480377197266\tTop1 Test accuracy: 64.44221496582031\tTop5 test acc: 96.64751434326172\n", + "Epoch 35\tTop1 Train accuracy 66.16669464111328\tTop1 Test accuracy: 64.4801254272461\tTop5 test acc: 96.63774871826172\n", + "Epoch 36\tTop1 Train accuracy 66.21452331542969\tTop1 Test accuracy: 64.4801254272461\tTop5 test acc: 96.63774871826172\n", + "Epoch 37\tTop1 Train accuracy 66.2547836303711\tTop1 Test accuracy: 64.5191879272461\tTop5 test acc: 96.61821746826172\n", + "Epoch 38\tTop1 Train accuracy 66.28069305419922\tTop1 Test accuracy: 64.5582504272461\tTop5 test acc: 96.62798309326172\n", + "Epoch 39\tTop1 Train accuracy 66.32653045654297\tTop1 Test accuracy: 64.57662963867188\tTop5 test acc: 96.63774871826172\n", + "Epoch 40\tTop1 Train accuracy 66.35881805419922\tTop1 Test accuracy: 64.62431335449219\tTop5 test acc: 96.61821746826172\n", + "Epoch 41\tTop1 Train accuracy 66.37077331542969\tTop1 Test accuracy: 64.68290710449219\tTop5 test acc: 96.61821746826172\n", + "Epoch 42\tTop1 Train accuracy 66.39269256591797\tTop1 Test accuracy: 64.66337585449219\tTop5 test acc: 96.61821746826172\n", + "Epoch 43\tTop1 Train accuracy 66.41262817382812\tTop1 Test accuracy: 64.66337585449219\tTop5 test acc: 96.63774871826172\n", + "Epoch 44\tTop1 Train accuracy 66.45248413085938\tTop1 Test accuracy: 64.62431335449219\tTop5 test acc: 96.65727996826172\n", + "Epoch 45\tTop1 Train accuracy 66.48238372802734\tTop1 Test accuracy: 64.65361022949219\tTop5 test acc: 96.66704559326172\n", + "Epoch 46\tTop1 Train accuracy 66.51825714111328\tTop1 Test accuracy: 64.65361022949219\tTop5 test acc: 96.67681121826172\n", + "Epoch 47\tTop1 Train accuracy 66.56608581542969\tTop1 Test accuracy: 64.64384460449219\tTop5 test acc: 96.65727996826172\n", + "Epoch 48\tTop1 Train accuracy 66.59996795654297\tTop1 Test accuracy: 64.61454772949219\tTop5 test acc: 96.67681121826172\n", + "Epoch 49\tTop1 Train accuracy 66.64381408691406\tTop1 Test accuracy: 64.67314147949219\tTop5 test acc: 96.67681121826172\n", + "Epoch 50\tTop1 Train accuracy 66.65178680419922\tTop1 Test accuracy: 64.70243835449219\tTop5 test acc: 96.69519805908203\n", + "Epoch 51\tTop1 Train accuracy 66.65178680419922\tTop1 Test accuracy: 64.72196960449219\tTop5 test acc: 96.69519805908203\n", + "Epoch 52\tTop1 Train accuracy 66.69363403320312\tTop1 Test accuracy: 64.70358276367188\tTop5 test acc: 96.72449493408203\n", + "Epoch 53\tTop1 Train accuracy 66.70957946777344\tTop1 Test accuracy: 64.75241088867188\tTop5 test acc: 96.71472930908203\n", + "Epoch 54\tTop1 Train accuracy 66.72552490234375\tTop1 Test accuracy: 64.81100463867188\tTop5 test acc: 96.71472930908203\n", + "Epoch 55\tTop1 Train accuracy 66.73548889160156\tTop1 Test accuracy: 64.84892272949219\tTop5 test acc: 96.69519805908203\n", + "Epoch 56\tTop1 Train accuracy 66.77734375\tTop1 Test accuracy: 64.82077026367188\tTop5 test acc: 96.71472930908203\n", + "Epoch 57\tTop1 Train accuracy 66.78730773925781\tTop1 Test accuracy: 64.81100463867188\tTop5 test acc: 96.73426055908203\n", + "Epoch 58\tTop1 Train accuracy 66.8092269897461\tTop1 Test accuracy: 64.82077026367188\tTop5 test acc: 96.73426055908203\n", + "Epoch 59\tTop1 Train accuracy 66.82716369628906\tTop1 Test accuracy: 64.81962585449219\tTop5 test acc: 96.74402618408203\n", + "Epoch 60\tTop1 Train accuracy 66.84510040283203\tTop1 Test accuracy: 64.83800506591797\tTop5 test acc: 96.74402618408203\n", + "Epoch 61\tTop1 Train accuracy 66.875\tTop1 Test accuracy: 64.80009460449219\tTop5 test acc: 96.75379180908203\n", + "Epoch 62\tTop1 Train accuracy 66.88894653320312\tTop1 Test accuracy: 64.79032897949219\tTop5 test acc: 96.76355743408203\n", + "Epoch 63\tTop1 Train accuracy 66.91127014160156\tTop1 Test accuracy: 64.78056335449219\tTop5 test acc: 96.76355743408203\n", + "Epoch 64\tTop1 Train accuracy 66.93319702148438\tTop1 Test accuracy: 64.76103210449219\tTop5 test acc: 96.77332305908203\n", + "Epoch 65\tTop1 Train accuracy 66.96907043457031\tTop1 Test accuracy: 64.78056335449219\tTop5 test acc: 96.77332305908203\n", + "Epoch 66\tTop1 Train accuracy 66.97704315185547\tTop1 Test accuracy: 64.79032897949219\tTop5 test acc: 96.77332305908203\n", + "Epoch 67\tTop1 Train accuracy 67.00494384765625\tTop1 Test accuracy: 64.76103210449219\tTop5 test acc: 96.77332305908203\n", + "Epoch 68\tTop1 Train accuracy 67.02487182617188\tTop1 Test accuracy: 64.74150085449219\tTop5 test acc: 96.77332305908203\n", + "Epoch 69\tTop1 Train accuracy 67.04280853271484\tTop1 Test accuracy: 64.73173522949219\tTop5 test acc: 96.78308868408203\n", + "Epoch 70\tTop1 Train accuracy 67.04280853271484\tTop1 Test accuracy: 64.77079772949219\tTop5 test acc: 96.77332305908203\n", + "Epoch 71\tTop1 Train accuracy 67.0447998046875\tTop1 Test accuracy: 64.79032897949219\tTop5 test acc: 96.77332305908203\n", + "Epoch 72\tTop1 Train accuracy 67.05078125\tTop1 Test accuracy: 64.75241088867188\tTop5 test acc: 96.77332305908203\n", + "Epoch 73\tTop1 Train accuracy 67.06074523925781\tTop1 Test accuracy: 64.76217651367188\tTop5 test acc: 96.77332305908203\n", + "Epoch 74\tTop1 Train accuracy 67.07270050048828\tTop1 Test accuracy: 64.74264526367188\tTop5 test acc: 96.77332305908203\n", + "Epoch 75\tTop1 Train accuracy 67.0826644897461\tTop1 Test accuracy: 64.7340316772461\tTop5 test acc: 96.77332305908203\n", + "Epoch 76\tTop1 Train accuracy 67.09263610839844\tTop1 Test accuracy: 64.7242660522461\tTop5 test acc: 96.78308868408203\n", + "Epoch 77\tTop1 Train accuracy 67.1045913696289\tTop1 Test accuracy: 64.6949691772461\tTop5 test acc: 96.76470184326172\n", + "Epoch 78\tTop1 Train accuracy 67.1105728149414\tTop1 Test accuracy: 64.6949691772461\tTop5 test acc: 96.75493621826172\n", + "Epoch 79\tTop1 Train accuracy 67.13288879394531\tTop1 Test accuracy: 64.6949691772461\tTop5 test acc: 96.75493621826172\n", + "Epoch 80\tTop1 Train accuracy 67.13887023925781\tTop1 Test accuracy: 64.7242660522461\tTop5 test acc: 96.76470184326172\n", + "Epoch 81\tTop1 Train accuracy 67.14684295654297\tTop1 Test accuracy: 64.7145004272461\tTop5 test acc: 96.76470184326172\n", + "Epoch 82\tTop1 Train accuracy 67.17076110839844\tTop1 Test accuracy: 64.7242660522461\tTop5 test acc: 96.75493621826172\n", + "Epoch 83\tTop1 Train accuracy 67.20065307617188\tTop1 Test accuracy: 64.71565246582031\tTop5 test acc: 96.75493621826172\n", + "Epoch 84\tTop1 Train accuracy 67.21659851074219\tTop1 Test accuracy: 64.72541809082031\tTop5 test acc: 96.74517059326172\n", + "Epoch 85\tTop1 Train accuracy 67.21061706542969\tTop1 Test accuracy: 64.7437973022461\tTop5 test acc: 96.74517059326172\n", + "Epoch 86\tTop1 Train accuracy 67.23851776123047\tTop1 Test accuracy: 64.7535629272461\tTop5 test acc: 96.74517059326172\n", + "Epoch 87\tTop1 Train accuracy 67.25247192382812\tTop1 Test accuracy: 64.72541809082031\tTop5 test acc: 96.74517059326172\n", + "Epoch 88\tTop1 Train accuracy 67.2584457397461\tTop1 Test accuracy: 64.71565246582031\tTop5 test acc: 96.73540496826172\n", + "Epoch 89\tTop1 Train accuracy 67.26641845703125\tTop1 Test accuracy: 64.79377746582031\tTop5 test acc: 96.73540496826172\n", + "Epoch 90\tTop1 Train accuracy 67.2704086303711\tTop1 Test accuracy: 64.79377746582031\tTop5 test acc: 96.72563934326172\n", + "Epoch 91\tTop1 Train accuracy 67.2803726196289\tTop1 Test accuracy: 64.77424621582031\tTop5 test acc: 96.73540496826172\n", + "Epoch 92\tTop1 Train accuracy 67.29033660888672\tTop1 Test accuracy: 64.78401184082031\tTop5 test acc: 96.74517059326172\n", + "Epoch 93\tTop1 Train accuracy 67.29830932617188\tTop1 Test accuracy: 64.78401184082031\tTop5 test acc: 96.74517059326172\n", + "Epoch 94\tTop1 Train accuracy 67.30429077148438\tTop1 Test accuracy: 64.78401184082031\tTop5 test acc: 96.74517059326172\n", + "Epoch 95\tTop1 Train accuracy 67.30030059814453\tTop1 Test accuracy: 64.79377746582031\tTop5 test acc: 96.74517059326172\n", + "Epoch 96\tTop1 Train accuracy 67.30827331542969\tTop1 Test accuracy: 64.77424621582031\tTop5 test acc: 96.72563934326172\n", + "Epoch 97\tTop1 Train accuracy 67.31624603271484\tTop1 Test accuracy: 64.7926254272461\tTop5 test acc: 96.71587371826172\n", + "Epoch 98\tTop1 Train accuracy 67.32222747802734\tTop1 Test accuracy: 64.8219223022461\tTop5 test acc: 96.71587371826172\n", + "Epoch 99\tTop1 Train accuracy 67.32820129394531\tTop1 Test accuracy: 64.8121566772461\tTop5 test acc: 96.71587371826172\n" ], "name": "stdout" } @@ -585,7 +814,7 @@ "source": [ "" ], - "execution_count": null, + "execution_count": 26, "outputs": [] } ]