diff --git a/Eval_Graphene_model.ipynb b/Eval_Graphene_model.ipynb index 33f6430..20588ac 100644 --- a/Eval_Graphene_model.ipynb +++ b/Eval_Graphene_model.ipynb @@ -56,6 +56,8 @@ "from skimage.filters import threshold_otsu\n", "from sklearn.mixture import GaussianMixture\n", "from sklearn.cluster import KMeans\n", + "from skimage.feature import blob_log\n", + "from scipy.ndimage import center_of_mass\n", "\n", "# for cropping function\n", "if drive:\n", @@ -92,7 +94,7 @@ "outputs": [], "source": [ "# set mps, just for my computer\n", - "device = torch.device('mps')" + "# device = torch.device('mps')" ] }, { @@ -119,6 +121,7 @@ "metadata": {}, "outputs": [], "source": [ + "# should be someting like 'content/drive/My Drive/Gomb-Net files'\n", "if drive:\n", " shared_folder = 'drive/My Drive/Gomb-Net files'\n", "else:\n", @@ -226,7 +229,7 @@ "metadata": {}, "outputs": [], "source": [ - "loss_history = np.load(str(shared_folder + '/Pretrained_models/TwoLeggedGraphene256.pthloss_history.npz'))\n", + "loss_history = np.load(str(shared_folder + '/Pretrained_models/Graphene_model_loss_history.npz'))\n", "train_loss = loss_history['train_loss_history']\n", "val_loss = loss_history['val_loss_history']\n", "\n", @@ -252,9 +255,9 @@ "metadata": {}, "outputs": [], "source": [ - "model_path = str(shared_folder + '/Pretrained_models/TwoLeggedGraphene256_b.pth')\n", + "model_path = str(shared_folder + '/Pretrained_models/Graphene_model.pth')\n", "\n", - "checkpoint = torch.load(model_path, map_location=torch.device('mps'))\n", + "checkpoint = torch.load(model_path, map_location=device)\n", "model.load_state_dict(checkpoint['model_state_dict'])\n", "optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n", "model.eval()" @@ -335,14 +338,10 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "#import blobdog\n", - "from skimage.feature import blob_log\n", - "from scipy.ndimage import center_of_mass" + "### Comparison to blob-finder" ] }, { @@ -364,6 +363,15 @@ "plt.axis('off')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# The following code is marked out, because it takes a few minutes to run. Just measuring network performance metrics across the test dataset" + ] + }, { "cell_type": "code", "execution_count": null, @@ -439,7 +447,7 @@ "metadata": {}, "outputs": [], "source": [ - "exp_data = np.load('./moire.npz')\n", + "exp_data = np.load(str(shared_folder + '/Experimental_datasets/moire.npz'))\n", "im_array = exp_data['im_array']\n", "pixel_size = exp_data['pixel_size']\n", "\n", @@ -456,16 +464,6 @@ "plt.imshow(im_array, cmap='gray')" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sys.path.insert(0, '/Users/austin/Documents/GitHub/DataGenSTEM/DataGenSTEM')\n", - "import data_generator as dg" - ] - }, { "cell_type": "code", "execution_count": null, @@ -565,26 +563,9 @@ "outputs": [], "source": [ "# make dist_hist a 1D array\n", - "dist_hist = np.concatenate(dist_hist) * 1e10 / resize_factor" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "median_dist = np.median(dist_hist)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "avg_dist = np.mean(dist_hist)\n", + "dist_hist = np.concatenate(dist_hist) * 1e10 / resize_factor\n", "\n", + "avg_dist = np.mean(dist_hist)\n", "\n", "plt.figure(figsize = (8,8), dpi=300)\n", "plt.hist(dist_hist, bins=100, color='gray')\n", @@ -631,7 +612,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Now, let's try to get graphene order parameter vector\n" + "### Below is some old code not used in the paper\n" ] }, {