diff --git a/examples/TF_Characterization_and_Control.ipynb b/examples/TF_Characterization_and_Control.ipynb new file mode 100644 index 0000000..13f6f9b --- /dev/null +++ b/examples/TF_Characterization_and_Control.ipynb @@ -0,0 +1,916 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + }, + "colab": { + "name": "TF Characterization_and_Control.ipynb", + "provenance": [], + "collapsed_sections": [] + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "g30eXP36qXjA" + }, + "source": [ + "

Example on using the datasets for quantum characterization and control

" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "2C0aqWLyqXjD" + }, + "source": [ + "# preample\n", + "import zipfile\n", + "import pickle\n", + "import os\n", + "import numpy as np" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "Y_KVq8FtqY7m" + }, + "source": [ + "'''The datalist below lists each different dataset category.\n", + "Each category comprises two compressed files:\n", + "(a) one for the non-distorted examples; and\n", + "(b) another for the distorted examples.\n", + "\n", + "Each dataset is stored in a folder corresponding to its category on Cloudstor.\n", + "There are 52 datasets in the QDataSet and 26 categories.\n", + "\n", + "\n", + "To run the code:\n", + "1. Download both the distorted and non-distorted datasets from Cloudstor\n", + "(assuming both are to be used);\n", + "2. To select a dataset for the model (e.g. G_1q_X), uncomment the dataset\n", + "in the dataset list below.'''" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "KKVDS6pMqXjE" + }, + "source": [ + "datalist = [\n", + "'G_1q_X',\n", + "# 'G_1q_XY',\n", + "# 'G_1q_XY_XZ_N1N5',\n", + "# 'G_1q_XY_XZ_N1N6',\n", + "# 'G_1q_XY_XZ_N3N6',\n", + "# 'G_1q_X_Z_N1',\n", + "# 'G_1q_X_Z_N2',\n", + "# 'G_1q_X_Z_N3',\n", + "# 'G_1q_X_Z_N4',\n", + "# 'G_2q_IX-XI_IZ-ZI_N1-N6',\n", + "# 'G_2q_IX-XI-XX',\n", + "# 'G_2q_IX-XI-XX_IZ-ZI_N1-N5',\n", + "# 'G_2q_IX-XI-XX_IZ-ZI_N1-N5',\n", + "# 'S_1q_X',\n", + "# 'S_1q_XY',\n", + "# 'S_1q_XY_XZ_N1N5',\n", + "# 'S_1q_XY_XZ_N1N6',\n", + "# 'S_1q_XY_XZ_N3N6',\n", + "# 'S_1q_X_Z_N1',\n", + "# 'S_1q_X_Z_N2',\n", + "# 'S_1q_X_Z_N3',\n", + "# 'S_1q_X_Z_N4',\n", + "# 'S_2q_IX-XI_IZ-ZI_N1-N6',\n", + "# 'S_2q_IX-XI-XX',\n", + "# 'S_2q_IX-XI-XX_IZ-ZI_N1-N5',\n", + "# 'S_2q_IX-XI-XX_IZ-ZI_N1-N6',\n", + "]" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "QR5iBorFqXjF" + }, + "source": [ + "'''Create two strings, one for each of the undistorted and distorted datasets.'''\n", + "data1 = datalist[0]\n", + "data2 = data1 + '_D'" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "aF0gCIRVqXjF", + "outputId": "bd3f77f1-3cf4-491a-80a1-6c7a99071919" + }, + "source": [ + "'''Check strings created correction'''\n", + "print(data1)\n", + "print(data2)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "G_1q_X\n", + "G_1q_X_D\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "_2ii5CF1qXjG" + }, + "source": [ + "'''Set the working directory to the location of the datasets, example below.'''\n", + "os.chdir('/projects/QuantumDS/QDataSet/' + data1)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Uj4uDe_LqXjI" + }, + "source": [ + "## Characterization of an open quantum system" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Fw4HbDprqXjJ" + }, + "source": [ + "'''Define a function to extract the data from the dataset.'''\n", + "\n", + "def load_dataset(dataset_name, num_training_ex, num_testing_ex):\n", + " \n", + " # initialize empt lists for storing the data\n", + " data_input = []\n", + " data_target = []\n", + " \n", + " # 1) unzip the dataset zipfile\n", + " fzip = zipfile.ZipFile(\"%s.zip\"%dataset_name, mode='r')\n", + " \n", + " # loop over example files\n", + " #########################################################\n", + " for idx_ex in range(num_training_ex + num_testing_ex):\n", + " \n", + " # 2) extract the example file from the dataset \n", + " fname = \"%s_ex_%d\"%(dataset_name, idx_ex)\n", + " fzip.extract( fname )\n", + " \n", + " # 3) load the example file\n", + " f = open(fname, \"rb\")\n", + " data = pickle.load(f)\n", + " \n", + " # 4) extract the useful information from the example file:\n", + " \n", + " # For noise spectroscopy, we need to extract a set of control pulse and the \n", + " # correpsonding informationally-complete observables\n", + " \n", + " # add the pair of input and target to the corresponding lists\n", + " data_input.append( data[\"pulses\"][0:1, :, 0, :])\n", + " data_target.append( data[\"expectations\"] )\n", + " \n", + " # 5) close and delete the example file\n", + " f.close()\n", + " os.remove(fname)\n", + " #########################################################\n", + " # 5) close the dataset zipfile\n", + " fzip.close()\n", + " \n", + " # 6) split the data into training and testing\n", + " data_input = np.concatenate(data_input, axis=0)\n", + " data_target = np.concatenate(data_target, axis=0)\n", + " \n", + " training_input = data_input[0:num_training_ex, :]\n", + " testing_input = data_input[num_training_ex:num_training_ex+num_testing_ex, :]\n", + " \n", + " training_target = data_target[0:num_training_ex, :]\n", + " testing_target = data_target[num_training_ex:num_training_ex+num_testing_ex, :]\n", + " \n", + " return training_input, training_target, testing_input, testing_target" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "3J58CjoUqXjK" + }, + "source": [ + "'''Define the dataset parameters'''\n", + "dataset_name = data1#\"G_1q_XY_XZ_N1N5\" # dataset name\n", + "num_training_ex = 7 # number of training examples\n", + "num_testing_ex = 3 # number of testing examples" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "z0flCyx6qXjK" + }, + "source": [ + "# load the dataset\n", + "training_input, training_target, testing_input, testing_target = load_dataset(dataset_name, num_training_ex, num_testing_ex)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "EMBBIsCeqXjL", + "outputId": "02f42ea5-cdc0-4c60-b302-35ce8d30e913" + }, + "source": [ + "'''Inspect the shape of the training and test dataframes (if need be).'''\n", + "print(training_input.shape, training_target.shape, testing_input.shape, testing_target.shape)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "(7, 1024, 1) (7, 18) (3, 1024, 1) (3, 18)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "oV629oSWqXjL", + "outputId": "66b0dc25-fc41-434b-80eb-ef36bf53e685" + }, + "source": [ + "'''Set input parameter shape based on number of axes along which controls / noise is applied.'''\n", + "axnum = training_input.shape[2]\n", + "axnum2 = training_target.shape[1]\n", + "print(axnum, axnum2)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "1 18\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "yZC1BYFjqXjM", + "outputId": "e6e3b156-7565-4977-d48e-1720a19dfcc2" + }, + "source": [ + "# use the dataset for training and testing an ML algorithm\n", + "\n", + "#############################################\n", + "## PUT YOUR CODE HERE ##\n", + "\n", + "# trained_model = my_training_function(training_input, training_target)\n", + "\n", + "# performance = my_testing_function(trained_model, testing_input, testing_target)\n", + "\n", + "# noise_spectrum = estimate_noise(trained_model)\n", + "\n", + "### Below is an example using tensorflow ###\n", + "\n", + "import tensorflow.keras as K\n", + "\n", + "input_layer = K.Input(shape=(None, axnum))\n", + "\n", + "output_layer = K.layers.LSTM(axnum2, return_sequences=False)(input_layer)\n", + "\n", + "ml_model = K.Model(input_layer, output_layer)\n", + "\n", + "ml_model.compile(optimizer=K.optimizers.Adam(), loss='mse')\n", + "\n", + "ml_model.fit(training_input, training_target, epochs=10, validation_data = (testing_input, testing_target))" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Epoch 1/10\n", + "1/1 [==============================] - 2s 2s/step - loss: 0.3332 - val_loss: 0.3331\n", + "Epoch 2/10\n", + "1/1 [==============================] - 0s 237ms/step - loss: 0.3330 - val_loss: 0.3330\n", + "Epoch 3/10\n", + "1/1 [==============================] - 0s 256ms/step - loss: 0.3328 - val_loss: 0.3330\n", + "Epoch 4/10\n", + "1/1 [==============================] - 0s 254ms/step - loss: 0.3326 - val_loss: 0.3329\n", + "Epoch 5/10\n", + "1/1 [==============================] - 0s 247ms/step - loss: 0.3323 - val_loss: 0.3328\n", + "Epoch 6/10\n", + "1/1 [==============================] - 0s 271ms/step - loss: 0.3321 - val_loss: 0.3327\n", + "Epoch 7/10\n", + "1/1 [==============================] - 0s 259ms/step - loss: 0.3319 - val_loss: 0.3326\n", + "Epoch 8/10\n", + "1/1 [==============================] - 0s 258ms/step - loss: 0.3316 - val_loss: 0.3325\n", + "Epoch 9/10\n", + "1/1 [==============================] - 0s 230ms/step - loss: 0.3314 - val_loss: 0.3325\n", + "Epoch 10/10\n", + "1/1 [==============================] - 0s 279ms/step - loss: 0.3311 - val_loss: 0.3324\n" + ], + "name": "stdout" + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 11 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pk47O1tEqXjM" + }, + "source": [ + "## Using the $V_O$ operators in a calculation" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "n0XfHjkbqXjM" + }, + "source": [ + "'''Define a function to extract the data from the dataset.'''\n", + "def load_Vo_dataset(dataset_name, num_ex):\n", + " \n", + " # initialize empt lists for storing the data\n", + " pulses = []\n", + " VX = []\n", + " VY = []\n", + " VZ = []\n", + " # 1) unzip the dataset zipfile\n", + " fzip = zipfile.ZipFile(\"%s.zip\"%dataset_name, mode='r')\n", + " \n", + " # loop over example files\n", + " #########################################################\n", + " for idx_ex in range(num_ex):\n", + " \n", + " # 2) extract the example file from the dataset \n", + " fname = \"%s_ex_%d\"%(dataset_name, idx_ex)\n", + " fzip.extract( fname )\n", + " \n", + " # 3) load the example file\n", + " f = open(fname, \"rb\")\n", + " data = pickle.load(f)\n", + " \n", + " # 4) extract the useful information from the example file:\n", + " \n", + " # For noise spectroscopy, we need to extract a set of control pulse and the \n", + " # correpsonding informationally-complete observables\n", + " \n", + " # add the pair of input and target to the corresponding lists\n", + " pulses.append( data[\"pulses\"][0:1, :, 0, :])\n", + " VX.append( data[\"Vo_operator\"][0] )\n", + " VY.append( data[\"Vo_operator\"][1] )\n", + " VZ.append( data[\"Vo_operator\"][2] )\n", + " \n", + " # 5) close and delete the example file\n", + " f.close()\n", + " os.remove(fname)\n", + " #########################################################\n", + " # 5) close the dataset zipfile\n", + " fzip.close()\n", + " \n", + " # 6) split the data into training and testing\n", + " pulses = np.concatenate(pulses, axis=0)\n", + " VX = np.concatenate(VX, axis=0)\n", + " VY = np.concatenate(VY, axis=0)\n", + " VZ = np.concatenate(VZ, axis=0) \n", + " \n", + " return pulses, VX, VY, VZ" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "VRqaamMBqXjN" + }, + "source": [ + "# define the dataset parameters\n", + "dataset_name = data2 #\"G_1q_XY_XZ_N1N5_D\" # dataset name\n", + "num_ex = 7 # number of examples" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "2_Bv3VDcqXjN" + }, + "source": [ + "pulses, VX, VY, VZ = load_Vo_dataset(dataset_name, num_ex)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "t1qOvOzjqXjN", + "outputId": "a259ddb5-db82-4743-f7c7-34552f0091b8" + }, + "source": [ + "print(pulses.shape, VX.shape, VY.shape, VZ.shape)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "(7, 1024, 1) (7, 2, 2) (7, 2, 2) (7, 2, 2)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "MhRDQANNqXjO", + "outputId": "ff9a6bcf-3a2c-4bba-b95a-143320046fd8" + }, + "source": [ + "'''Set input parameter shape based on number of qubits and set corresponding shape of flattened \n", + "tensor for input into LSTM parameter.'''\n", + "# axnum = training_input.shape[2]\n", + "axnum3 = VX.shape[2]\n", + "lstmflat = axnum3 **2\n" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "2 4\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "I9TV7ta6qXjO" + }, + "source": [ + "# use the Vo in a calculation\n", + "\n", + "#############################################\n", + "## PUT YOUR CODE HERE ##\n", + "\n", + "# result = my_calculation(pulses, VX)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "W-7DdpdsqXjO", + "outputId": "11aa19aa-021f-4ba7-f79c-d7eedbc2b2c8" + }, + "source": [ + "# train an ML model to learn Vo\n", + "\n", + "#############################################\n", + "## PUT YOUR CODE HERE ##\n", + "\n", + "# trained_model = my_training_function(training_input=pulses[0:100,;], training_target=VX[0:100, :])\n", + "\n", + "# performance = my_testing_function(trained_model, testing_input=pulses[100:,:], testing_target=Vx[100:,:])\n", + "\n", + "### Below is an example using tensorflow ###\n", + "\n", + "import tensorflow.keras as K\n", + "\n", + "input_layer = K.Input(shape=(None, axnum))\n", + "\n", + "output_layer = K.layers.Reshape((axnum3,axnum3)) ( K.layers.LSTM(lstmflat, return_sequences=False)(input_layer) )\n", + "\n", + "ml_model = K.Model(input_layer, output_layer)\n", + "\n", + "ml_model.compile(optimizer=K.optimizers.Adam(), loss='mse')\n", + "\n", + "ml_model.fit(pulses, VX, epochs=10, validation_split=0.1)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Epoch 1/10\n", + "1/1 [==============================] - 1s 1s/step - loss: 0.4999 - val_loss: 0.4987\n", + "Epoch 2/10\n", + "1/1 [==============================] - 0s 221ms/step - loss: 0.4987 - val_loss: 0.4976\n", + "Epoch 3/10\n", + "1/1 [==============================] - 0s 213ms/step - loss: 0.4976 - val_loss: 0.4964\n", + "Epoch 4/10\n", + "1/1 [==============================] - 0s 207ms/step - loss: 0.4964 - val_loss: 0.4952\n", + "Epoch 5/10\n", + "1/1 [==============================] - 0s 241ms/step - loss: 0.4952 - val_loss: 0.4940\n", + "Epoch 6/10\n", + "1/1 [==============================] - 0s 229ms/step - loss: 0.4940 - val_loss: 0.4928\n", + "Epoch 7/10\n", + "1/1 [==============================] - 0s 178ms/step - loss: 0.4928 - val_loss: 0.4916\n", + "Epoch 8/10\n", + "1/1 [==============================] - 0s 229ms/step - loss: 0.4916 - val_loss: 0.4904\n", + "Epoch 9/10\n", + "1/1 [==============================] - 0s 197ms/step - loss: 0.4904 - val_loss: 0.4891\n", + "Epoch 10/10\n", + "1/1 [==============================] - 0s 240ms/step - loss: 0.4891 - val_loss: 0.4879\n" + ], + "name": "stdout" + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 18 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zKOrO90VqXjO" + }, + "source": [ + "## Model the effect of control distortions" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "BW4HxJPPqXjP" + }, + "source": [ + "# define the dataset parameters\n", + "dataset_name = data2 #\"G_1q_XY_XZ_N1N5_D\" # dataset name\n", + "num_training_ex = 7 # number of training examples\n", + "num_testing_ex = 3 # number of testing examples" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "crAqOEoEqXjP" + }, + "source": [ + "# load the dataset\n", + "training_input, training_target, testing_input, testing_target = load_dataset(dataset_name, num_training_ex, num_testing_ex)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "qCDjc1_SqXjP", + "outputId": "0b15ee62-b535-4eeb-a00f-70a579e062df" + }, + "source": [ + "print(training_input.shape, training_target.shape, testing_input.shape, testing_target.shape)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "(7, 1024, 1) (7, 18) (3, 1024, 1) (3, 18)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "h1fPLTukqXjP", + "outputId": "49d04e02-b196-43d5-cfdd-3b46d5cf525b" + }, + "source": [ + "'''Set shape parameters based on shape of inputs and outputs.'''\n", + "axnum = training_input.shape[2]\n", + "lstmout = training_target.shape[1]\n" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "(7, 1024, 1) (7, 18) (3, 1024, 1) (3, 18)\n", + "1\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Eid1-VYwqXjQ", + "outputId": "0f8a3ec0-b685-4aff-8de1-8e3f7c3c5436" + }, + "source": [ + "# use the dataset for training and testing an ML algorithm\n", + "\n", + "#############################################\n", + "## PUT YOUR CODE HERE ##\n", + "\n", + "# trained_model = my_training_function(training_input, training_target)\n", + "\n", + "# performance = my_testing_function(trained_model, testing_input, testing_target)\n", + "\n", + "### Below is an example using tensorflow ###\n", + "\n", + "import tensorflow.keras as K\n", + "\n", + "input_layer = K.Input(shape=(None, axnum))\n", + "\n", + "output_layer = K.layers.LSTM(lstmout, return_sequences=False)(input_layer)\n", + "\n", + "ml_model = K.Model(input_layer, output_layer)\n", + "\n", + "ml_model.compile(optimizer=K.optimizers.Adam(), loss='mse')\n", + "\n", + "ml_model.fit(training_input, training_target, epochs=10, validation_data = (testing_input, testing_target))" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Epoch 1/10\n", + "1/1 [==============================] - 2s 2s/step - loss: 0.3332 - val_loss: 0.3332\n", + "Epoch 2/10\n", + "1/1 [==============================] - 0s 223ms/step - loss: 0.3328 - val_loss: 0.3332\n", + "Epoch 3/10\n", + "1/1 [==============================] - 0s 223ms/step - loss: 0.3324 - val_loss: 0.3332\n", + "Epoch 4/10\n", + "1/1 [==============================] - 0s 260ms/step - loss: 0.3319 - val_loss: 0.3332\n", + "Epoch 5/10\n", + "1/1 [==============================] - 0s 298ms/step - loss: 0.3315 - val_loss: 0.3332\n", + "Epoch 6/10\n", + "1/1 [==============================] - 0s 293ms/step - loss: 0.3310 - val_loss: 0.3333\n", + "Epoch 7/10\n", + "1/1 [==============================] - 0s 244ms/step - loss: 0.3305 - val_loss: 0.3333\n", + "Epoch 8/10\n", + "1/1 [==============================] - 0s 206ms/step - loss: 0.3300 - val_loss: 0.3333\n", + "Epoch 9/10\n", + "1/1 [==============================] - 0s 209ms/step - loss: 0.3294 - val_loss: 0.3334\n", + "Epoch 10/10\n", + "1/1 [==============================] - 0s 194ms/step - loss: 0.3288 - val_loss: 0.3335\n" + ], + "name": "stdout" + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 23 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6pIscMDvqXjQ" + }, + "source": [ + "## Learning a controller for a quantum system" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "bya6ZuknqXjQ" + }, + "source": [ + "# define the dataset parameters\n", + "dataset_name = data1 #\"G_1q_XY_XZ_N1N5\" # dataset name\n", + "num_training_ex = 7 # number of training examples\n", + "num_testing_ex = 3 # number of testing examples" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "J-T5MWwGqXjQ" + }, + "source": [ + "# load the dataset [inputs and targets are echanged for quantum control problem, this is the inverse of the \n", + "# characterization problem]\n", + "training_target, training_input, testing_target, testing_input = load_dataset(dataset_name, num_training_ex, num_testing_ex)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "sa9vC8OOqXjR", + "outputId": "1b4f6837-8ca4-492f-a649-32a468fe98d9" + }, + "source": [ + "print(training_input.shape, training_target.shape, testing_input.shape, testing_target.shape)\n", + "\n" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "(7, 18) (7, 1024, 1) (3, 18) (3, 1024, 1)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ZOpFUhzcqXjR" + }, + "source": [ + "'''Set shape parameters based on shape of inputs and outputs.'''\n", + "axnum4 = training_input.shape[1]\n", + "axnum5 = training_target.shape[1]\n", + "axnum6 = training_target.shape[2]\n", + "axnum7 = axnum5 * axnum6" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "7AYkya-cqXjR", + "outputId": "e7549d7d-a130-4d2c-a7f8-6bdf8ab7797e" + }, + "source": [ + "# use the dataset for training and testing an ML algorithm\n", + "\n", + "#############################################\n", + "## PUT YOUR CODE HERE ##\n", + "\n", + "# trained_model = my_training_function(training_input, training_target)\n", + "\n", + "# performance = my_testing_function(trained_model, testing_input, testing_target)\n", + "\n", + "### Below is an example using tensorflow ###\n", + "\n", + "import tensorflow.keras as K\n", + "\n", + "input_layer = K.Input(shape=(axnum4))\n", + "\n", + "output_layer = K.layers.Reshape((axnum5,axnum6))( K.layers.Dense(axnum7)(input_layer) )\n", + "\n", + "ml_model = K.Model(input_layer, output_layer)\n", + "\n", + "ml_model.compile(optimizer=K.optimizers.Adam(), loss='mse')\n", + "\n", + "ml_model.fit(training_input, training_target, epochs=10, validation_data = (testing_input, testing_target))" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Epoch 1/10\n", + "1/1 [==============================] - 0s 221ms/step - loss: 439.2711 - val_loss: 523.0981\n", + "Epoch 2/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 439.1790 - val_loss: 523.1002\n", + "Epoch 3/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 439.0872 - val_loss: 523.1022\n", + "Epoch 4/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 438.9952 - val_loss: 523.1042\n", + "Epoch 5/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 438.9032 - val_loss: 523.1063\n", + "Epoch 6/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 438.8113 - val_loss: 523.1083\n", + "Epoch 7/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 438.7196 - val_loss: 523.1104\n", + "Epoch 8/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 438.6277 - val_loss: 523.1125\n", + "Epoch 9/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 438.5360 - val_loss: 523.1146\n", + "Epoch 10/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 438.4443 - val_loss: 523.1167\n" + ], + "name": "stdout" + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 28 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "rZ8yWC2jqXjR" + }, + "source": [ + "" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "X59zpnpNqXjR" + }, + "source": [ + "" + ], + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/examples/TF_QST.ipynb b/examples/TF_QST.ipynb new file mode 100644 index 0000000..bd6de57 --- /dev/null +++ b/examples/TF_QST.ipynb @@ -0,0 +1,731 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + }, + "colab": { + "name": "TF QST.ipynb", + "provenance": [], + "collapsed_sections": [] + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "cLSXsBlT352N" + }, + "source": [ + "

Example on using the datasets for quantum state tomography

" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "f1HYi6sy352Q" + }, + "source": [ + "# preample\n", + "import zipfile\n", + "import pickle\n", + "import os\n", + "import numpy as np" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "aF38mYPB4Ff3" + }, + "source": [ + "'''The datalist below lists each different dataset category.\n", + "Each category comprises two compressed files:\n", + "(a) one for the non-distorted examples; and\n", + "(b) another for the distorted examples.\n", + "\n", + "Each dataset is stored in a folder corresponding to its category on Cloudstor.\n", + "There are 52 datasets in the QDataSet and 26 categories.\n", + "\n", + "\n", + "To run the code:\n", + "1. Download both the distorted and non-distorted datasets from Cloudstor\n", + "(assuming both are to be used);\n", + "2. To select a dataset for the model (e.g. G_1q_X), uncomment the dataset\n", + "in the dataset list below.'''" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "sHyJoxQD352S" + }, + "source": [ + "datalist = [\n", + "'G_1q_X',\n", + "# 'G_1q_XY',\n", + "# 'G_1q_XY_XZ_N1N5',\n", + "# 'G_1q_XY_XZ_N1N6',\n", + "# 'G_1q_XY_XZ_N3N6',\n", + "# 'G_1q_X_Z_N1',\n", + "# 'G_1q_X_Z_N2',\n", + "# 'G_1q_X_Z_N3',\n", + "# 'G_1q_X_Z_N4',\n", + "# 'G_2q_IX-XI_IZ-ZI_N1-N6',\n", + "# 'G_2q_IX-XI-XX',\n", + "# 'G_2q_IX-XI-XX_IZ-ZI_N1-N5',\n", + "# 'G_2q_IX-XI-XX_IZ-ZI_N1-N5',\n", + "# 'S_1q_X',\n", + "# 'S_1q_XY',\n", + "# 'S_1q_XY_XZ_N1N5',\n", + "# 'S_1q_XY_XZ_N1N6',\n", + "# 'S_1q_XY_XZ_N3N6',\n", + "# 'S_1q_X_Z_N1',\n", + "# 'S_1q_X_Z_N2',\n", + "# 'S_1q_X_Z_N3',\n", + "# 'S_1q_X_Z_N4',\n", + "# 'S_2q_IX-XI_IZ-ZI_N1-N6',\n", + "# 'S_2q_IX-XI-XX',\n", + "# 'S_2q_IX-XI-XX_IZ-ZI_N1-N5',\n", + "# 'S_2q_IX-XI-XX_IZ-ZI_N1-N6',\n", + "]" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "xUkE84rp352T", + "outputId": "730d34b4-d5f8-421c-e442-da260b11961c" + }, + "source": [ + "'''Create two strings, one for each of the undistorted and distorted datasets.'''\n", + "data1 = datalist[0]\n", + "data2 = data1 + '_D'\n", + "print(data1)\n", + "print(data2)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "S_2q_IX-XI-XX_IZ-ZI_N1-N5\n", + "S_2q_IX-XI-XX_IZ-ZI_N1-N5_D\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "BqorGjX6352U" + }, + "source": [ + "'''Set the working directory to the location of the datasets, example below.'''\n", + "os.chdir('/projects/QuantumDS/QDataSet/' + data1)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZPl94x0m352U" + }, + "source": [ + "## Pure state tomography" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "D6VqcE4f352V" + }, + "source": [ + "'''Define a function to extract the data from the dataset.'''\n", + "\n", + "def load_pure_dataset(dataset_name, num_training_ex, num_testing_ex):\n", + " \n", + " # initialize empt lists for storing the data\n", + " data_input = []\n", + " data_target = []\n", + " \n", + " # 1) unzip the dataset zipfile\n", + " fzip = zipfile.ZipFile(\"%s.zip\"%dataset_name, mode='r')\n", + " \n", + " # loop over example files\n", + " #########################################################\n", + " for idx_ex in range(num_training_ex + num_testing_ex):\n", + " \n", + " # 2) extract the example file from the dataset \n", + " fname = \"%s_ex_%d\"%(dataset_name, idx_ex)\n", + " fzip.extract( fname )\n", + " \n", + " # 3) load the example file\n", + " f = open(fname, \"rb\")\n", + " data = pickle.load(f)\n", + " \n", + " # 4) extract the useful information from the example file:\n", + " \n", + " # For state tomograhpy we need to extract a set of states and the corresponding Pauli observables,\n", + " # so we pick arbitrarily the evolved state at t=T from an initial X+ state, and the first noise realization.\n", + " # We can choose initial state/ time / realization\n", + " \n", + " idx_time = 1023 # t=T\n", + " idx_realization = 0 # first realization\n", + " idx_initial_state = 0 # Pauli X+ initial state\n", + " \n", + " # construct the full unitary from the product of control unitary and interaction unitary\n", + " U = data[\"U0\"][0,idx_time,0,:]@data[\"UI\"][0,idx_realization,:]\n", + " \n", + " # evolve the initial state\n", + " rho_0 = data[\"sim_parameters\"][\"initial_states\"][idx_initial_state] # initial state\n", + " rho_t = U @ rho_0 @ U.conj().T # evolved state\n", + " \n", + " # extract corresponding Pauli observables\n", + " measurements = data[\"Eo\"][0:1,idx_realization,idx_initial_state:idx_initial_state+3]\n", + " \n", + " # add the pair of input and target to the corresponding lists\n", + " \n", + " data_input.append( measurements )\n", + " data_target.append( np.reshape(rho_t, (1,rho_t.shape[0],rho_t.shape[1])) )\n", + " \n", + " # 5) close and delete the example file\n", + " f.close()\n", + " os.remove(fname)\n", + " #########################################################\n", + " # 5) close the dataset zipfile\n", + " fzip.close()\n", + " \n", + " # 6) split the data into training and testing\n", + " data_input = np.concatenate(data_input, axis=0)\n", + " data_target = np.concatenate(data_target, axis=0)\n", + " \n", + " training_input = data_input[0:num_training_ex, :]\n", + " testing_input = data_input[num_training_ex:num_training_ex+num_testing_ex, :]\n", + " \n", + " training_target = data_target[0:num_training_ex, :]\n", + " testing_target = data_target[num_training_ex:num_training_ex+num_testing_ex, :]\n", + " \n", + " return training_input, training_target, testing_input, testing_target" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "g7VyDHNt352X" + }, + "source": [ + "# define the dataset parameters\n", + "dataset_name = data1 #\"G_1q_XY_XZ_N1N5\" # dataset name\n", + "num_training_ex = 7 # number of training examples\n", + "num_testing_ex = 3 # number of testing examples" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "ce9fp2aP4m_8" + }, + "source": [ + "# load the dataset\n", + "training_input, training_target, testing_input, testing_target = load_pure_dataset(dataset_name, num_training_ex, num_testing_ex)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "DS_7jEpT352a", + "outputId": "998f5a62-fe04-4aaa-9698-0c05b58b6de5" + }, + "source": [ + "print(training_input.shape, training_target.shape, testing_input.shape, testing_target.shape)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "(7, 3) (7, 4, 4) (3, 3) (3, 4, 4)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "2PqNKVr0352a", + "outputId": "9a826d42-1c35-4ac3-d77b-72ceb4bb8141" + }, + "source": [ + "'''Set input parameter shape based on number of axes along which controls / noise is applied.'''\n", + "axnum = training_input.shape[1]\n", + "axnum2 = training_target.shape[1]\n" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "3 4\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "An1wtZNR352b", + "outputId": "a5d6aab9-190d-4a64-af2d-3efdfc649889" + }, + "source": [ + "# use the dataset for training and testing an ML algorithm\n", + "\n", + "#############################################\n", + "## PUT YOUR CODE HERE ##\n", + "\n", + "# trained_model = my_training_function(training_input, training_target)\n", + "\n", + "# performance = my_testing_function(trained_model, testing_input, testing_target)\n", + "\n", + "### Below is an example using tensorflow ###\n", + "\n", + "import tensorflow.keras as K\n", + "\n", + "input_layer = K.Input(shape=(axnum))\n", + "\n", + "output_layer = K.layers.Reshape((axnum2,axnum2))(K.layers.Dense(axnum2**2)(input_layer))\n", + "\n", + "ml_model = K.Model(input_layer, output_layer)\n", + "\n", + "ml_model.compile(optimizer=K.optimizers.Adam(), loss='mse')\n", + "\n", + "ml_model.fit(training_input, training_target, epochs=10, validation_data = (testing_input, testing_target))" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Epoch 1/10\n", + "1/1 [==============================] - 0s 227ms/step - loss: 0.0971 - val_loss: 0.1189\n", + "Epoch 2/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0965 - val_loss: 0.1182\n", + "Epoch 3/10\n", + "1/1 [==============================] - 0s 11ms/step - loss: 0.0959 - val_loss: 0.1174\n", + "Epoch 4/10\n", + "1/1 [==============================] - 0s 11ms/step - loss: 0.0954 - val_loss: 0.1167\n", + "Epoch 5/10\n", + "1/1 [==============================] - 0s 11ms/step - loss: 0.0948 - val_loss: 0.1160\n", + "Epoch 6/10\n", + "1/1 [==============================] - 0s 11ms/step - loss: 0.0943 - val_loss: 0.1153\n", + "Epoch 7/10\n", + "1/1 [==============================] - 0s 11ms/step - loss: 0.0938 - val_loss: 0.1146\n", + "Epoch 8/10\n", + "1/1 [==============================] - 0s 11ms/step - loss: 0.0932 - val_loss: 0.1139\n", + "Epoch 9/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0927 - val_loss: 0.1132\n", + "Epoch 10/10\n", + "1/1 [==============================] - 0s 13ms/step - loss: 0.0922 - val_loss: 0.1125\n" + ], + "name": "stdout" + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 31 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YXs436vB352b" + }, + "source": [ + "## Mixed state tomography" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "X5Fjwp7Q352c" + }, + "source": [ + "def load_mixed_dataset(dataset_name, num_training_ex, num_testing_ex):\n", + " \n", + " # initialize empt lists for storing the data\n", + " data_input = []\n", + " data_target = []\n", + " \n", + " # 1) unzip the dataset zipfile\n", + " fzip = zipfile.ZipFile(\"%s.zip\"%dataset_name, mode='r')\n", + " \n", + " # loop over example files\n", + " #########################################################\n", + " for idx_ex in range(num_training_ex + num_testing_ex):\n", + " \n", + " # 2) extract the example file from the dataset \n", + " fname = \"%s_ex_%d\"%(dataset_name, idx_ex)\n", + " fzip.extract( fname )\n", + " \n", + " # 3) load the example file\n", + " f = open(fname, \"rb\")\n", + " data = pickle.load(f)\n", + " \n", + " # 4) extract the useful information from the example file:\n", + " \n", + " # For state tomograhpy we need to extract a set of states and the corresponding Pauli observables,\n", + " # so we pick arbitrarily the evolved state at t=T from an initial X+ state, and averaged over all noize realizations.\n", + " # We can choose initial state/ time / realization\n", + " \n", + " idx_time = 1023 # t=T\n", + " idx_initial_state = 0 # Pauli X+ initial state index\n", + " \n", + " rho_0 = data[\"sim_parameters\"][\"initial_states\"][idx_initial_state] # initial state\n", + " U0 = data[\"U0\"][0,idx_time,0,:] # control unitary\n", + " \n", + " \n", + " \n", + " rho_t = [] # list of evolved states at t=T for every noise realization \n", + " measurements = [] # list of measurements at t=T for every noise realization \n", + " \n", + " # loop over noise realizations\n", + " for idx_UI, UI in enumerate(data[\"UI\"][0,:,0:U0.shape[0], 0:U0.shape[1]]): \n", + " # evolve the initial state\n", + " rho_t.append( (U0@UI) @ rho_0 @ (U0@UI).conj().T )\n", + " \n", + " # extract corresponding Pauli observables\n", + " measurements.append( data[\"Eo\"][0:1, idx_UI, idx_initial_state:idx_initial_state+3] )\n", + " \n", + " # average over all noise realizations to get a mixed state\n", + " rho_t = sum(rho_t) / data[\"sim_parameters\"][\"K\"]\n", + " measurements = sum(measurements) / data[\"sim_parameters\"][\"K\"]\n", + " \n", + " # add the pair of input and target to the corresponding lists\n", + " data_input.append( measurements )\n", + " data_target.append( np.reshape(rho_t, (1,rho_t.shape[0],rho_t.shape[1])) )\n", + " \n", + " # 5) close and delete the example file\n", + " f.close()\n", + " os.remove(fname)\n", + " #########################################################\n", + " # 5) close the dataset zipfile\n", + " fzip.close()\n", + " \n", + " # 6) split the data into training and testing\n", + " data_input = np.concatenate(data_input, axis=0)\n", + " data_target = np.concatenate(data_target, axis=0)\n", + " \n", + " training_input = data_input[0:num_training_ex, :]\n", + " testing_input = data_input[num_training_ex:num_training_ex+num_testing_ex, :]\n", + " \n", + " training_target = data_target[0:num_training_ex, :]\n", + " testing_target = data_target[num_training_ex:num_training_ex+num_testing_ex, :]\n", + " \n", + " return training_input, training_target, testing_input, testing_target" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "4rhuFQAM352c" + }, + "source": [ + "# define the dataset parameters\n", + "dataset_name = data1 #\"G_1q_XY_XZ_N1N5\" # dataset name\n", + "num_training_ex = 7 # number of training examples\n", + "num_testing_ex = 3 # number of testing examples" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "kPAJ0_DM4xTp" + }, + "source": [ + "# load the dataset\n", + "training_input, training_target, testing_input, testing_target = load_mixed_dataset(dataset_name, num_training_ex, num_testing_ex)" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "I4cv3KpQ352d", + "outputId": "dfba2ac2-c200-4870-a4e0-3539ebed1610" + }, + "source": [ + "print(training_input.shape, training_target.shape, testing_input.shape, testing_target.shape)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "(7, 3) (7, 4, 4) (3, 3) (3, 4, 4)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "nOTyPXGu352d", + "outputId": "43d1ab6b-fcd8-4fa1-9f85-bb99d19bdd0f" + }, + "source": [ + "'''Set input parameter shape based on number of axes along which controls / noise is applied.'''\n", + "axnum = training_input.shape[1]\n", + "axnum2 = training_target.shape[1]\n" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "3 4\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "6mpUf3dS352e", + "outputId": "be3bf2a1-b960-4fe3-e654-08a792774c12" + }, + "source": [ + "# use the dataset for training and testing an ML algorithm\n", + "\n", + "#############################################\n", + "## PUT YOUR CODE HERE ##\n", + "\n", + "# trained_model = my_training_function(training_input, training_target)\n", + "\n", + "# performance = my_testing_function(trained_model, testing_input, testing_target)\n", + "\n", + "### Below is an example using tensorflow ###\n", + "\n", + "import tensorflow.keras as K\n", + "\n", + "input_layer = K.Input(shape=(axnum))\n", + "\n", + "output_layer = K.layers.Reshape((axnum2,axnum2))(K.layers.Dense(axnum2**2)(input_layer))\n", + "\n", + "ml_model = K.Model(input_layer, output_layer)\n", + "\n", + "ml_model.compile(optimizer=K.optimizers.Adam(), loss='mse')\n", + "\n", + "ml_model.fit(training_input, training_target, epochs=10, validation_data = (testing_input, testing_target))" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Epoch 1/10\n", + "1/1 [==============================] - 0s 241ms/step - loss: 0.0689 - val_loss: 0.0854\n", + "Epoch 2/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0684 - val_loss: 0.0849\n", + "Epoch 3/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0680 - val_loss: 0.0844\n", + "Epoch 4/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0675 - val_loss: 0.0838\n", + "Epoch 5/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0670 - val_loss: 0.0833\n", + "Epoch 6/10\n", + "1/1 [==============================] - 0s 13ms/step - loss: 0.0666 - val_loss: 0.0828\n", + "Epoch 7/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0661 - val_loss: 0.0823\n", + "Epoch 8/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0657 - val_loss: 0.0818\n", + "Epoch 9/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0653 - val_loss: 0.0813\n", + "Epoch 10/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0648 - val_loss: 0.0808\n" + ], + "name": "stdout" + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 37 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7EicWOmf352e" + }, + "source": [ + "## Model the effect of sampling noise (finite number of shots)" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "RQF2nZx5352e" + }, + "source": [ + "Nshots = 1024 # Number of shots" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "chknvWp5352f" + }, + "source": [ + "training_input_noisy = np.zeros(training_input.shape)\n", + "testing_input_noisy = np.zeros(testing_input.shape)\n", + "\n", + "# generate a binomial distribution with mean centered at the ideal expectation values, the states (targets) do not change\n", + "for idx_ex in range(num_training_ex):\n", + " training_input_noisy[idx_ex,:] = [(2*np.random.binomial(Nshots, 0.5*(exp+1) ) - Nshots)/Nshots for exp in training_input[idx_ex,:]]\n", + " \n", + "for idx_ex in range(num_testing_ex):\n", + " testing_input_noisy[idx_ex,:] = [(2*np.random.binomial(Nshots, 0.5*(exp+1) ) - Nshots)/Nshots for exp in testing_input[idx_ex,:]]" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "n-WcRDX2352f", + "outputId": "585a8688-5533-40d9-f106-7548d03f4cf4" + }, + "source": [ + "# use the dataset for training and testing an ML algorithm\n", + "\n", + "#############################################\n", + "## PUT YOUR CODE HERE ##\n", + "\n", + "# trained_model = my_training_function(training_input_noisy, training_target)\n", + "\n", + "# performance = my_testing_function(trained_model, testing_input_noisy, testing_target)\n", + "\n", + "### Below is an example using tensorflow ###\n", + "\n", + "import tensorflow.keras as K\n", + "\n", + "input_layer = K.Input(shape=(axnum))\n", + "\n", + "output_layer = K.layers.Reshape((axnum2,axnum2))(K.layers.Dense(axnum2**2)(input_layer))\n", + "\n", + "ml_model = K.Model(input_layer, output_layer)\n", + "\n", + "ml_model.compile(optimizer=K.optimizers.Adam(), loss='mse')\n", + "\n", + "ml_model.fit(training_input, training_target, epochs=10, validation_data = (testing_input, testing_target))" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Epoch 1/10\n", + "1/1 [==============================] - 0s 224ms/step - loss: 0.0588 - val_loss: 0.0516\n", + "Epoch 2/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0584 - val_loss: 0.0511\n", + "Epoch 3/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0580 - val_loss: 0.0507\n", + "Epoch 4/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0576 - val_loss: 0.0503\n", + "Epoch 5/10\n", + "1/1 [==============================] - 0s 11ms/step - loss: 0.0571 - val_loss: 0.0498\n", + "Epoch 6/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0567 - val_loss: 0.0494\n", + "Epoch 7/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0563 - val_loss: 0.0490\n", + "Epoch 8/10\n", + "1/1 [==============================] - 0s 11ms/step - loss: 0.0559 - val_loss: 0.0486\n", + "Epoch 9/10\n", + "1/1 [==============================] - 0s 12ms/step - loss: 0.0555 - val_loss: 0.0482\n", + "Epoch 10/10\n", + "1/1 [==============================] - 0s 11ms/step - loss: 0.0551 - val_loss: 0.0478\n" + ], + "name": "stdout" + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 40 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "HfCNpbs7352f" + }, + "source": [ + "" + ], + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file