I. / 1. Linear classification with a single neuron

(0) Importing libraries

In [1]:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

from tensorflow.python.framework import ops
ops.reset_default_graph()  # Clears the default graph stack and resets the global default graph


#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.04
#tf.reset_default_graph()

(1) Creating training set

1-a Setting parameters

In [2]:
# Number of samples
num_samples = 2 * 300
# Variance of normal noise to perturbate samples
variance = 0.15

1-b Creating training samples and labels

In [3]:
#********************************
#            SAMPLES
#********************************
# Coordinates of class1
class_1_x1 = np.linspace(0.0, 2.0, num_samples//2)  #(300,)
class_1_x2 = np.linspace(1.5, 2.5, num_samples//2)  #(300,)
# Coordinates of class2
class_2_x1 = np.linspace(0.0, 2.0, num_samples//2)  #(300,)
class_2_x2 = np.linspace(0.0, 1.0, num_samples//2)  #(300,)

# Making two vectors of samples: class1 and class2
class_1 = np.column_stack((class_1_x1, class_1_x2))  #(300, 2)
class_2 = np.column_stack((class_2_x1, class_2_x2))  #(300, 2)

# Making one vector with all of the samples
inputs = np.row_stack((class_1, class_2))          #(600, 2)

# Perturbating samples with normal noise
inputs = inputs + np.random.normal(0.0, variance, [num_samples, 2])


#********************************
#            LABELS
#********************************
# Labels of the classes
target_1 = np.repeat(1.0, num_samples//2)           #(300,)
target_2 = np.repeat(0.0, num_samples//2)           #(300,)

# Concatenating labels to one vector
targets = np.concatenate((target_1, target_2))      #(600,)

# Extend dimension
targets = np.expand_dims(targets, 1)                #(600, 1)

1-c Plotting samples

In [4]:
# Plotting training samples according to their labels/classes
for i in range(num_samples):
    if(targets[i] == 1.0):
        plt.plot(inputs[i,0], inputs[i,1], 'ro')
    else:
        plt.plot(inputs[i,0], inputs[i,1], 'bo')
# Plotting figure   
plt.show()

(2) Creating variables and placeholders

In [5]:
ops.reset_default_graph()  # Clearing the default graph stack and reseting the global default graph

2-a Placeholders

In [6]:
# Creating placeholders
x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)

2-b Variables

In [7]:
# Creating model variables (parameters)
W = tf.Variable(tf.random_normal(shape=[2, 1]))  #súlymátrix
b = tf.Variable(tf.random_normal(shape=[1, 1]))  #eltolás

(3) Structure of the model

01_excercies_0.png

3-a Creating the structure (mapping) of the model

In [8]:
# The structure of the neuron
linear = tf.add(tf.matmul(x_data, W), b)
model_output = tf.nn.sigmoid(linear)

3-b Setting the predicted class

In [9]:
# Predicting class for input datas :       (0, 1) --> (-0.5, 0.5) --> {-1, 1} --> {0, 2} --> {0, 1} 
prediction = tf.multiply(tf.add(tf.sign(model_output - 0.5), 1.0), 0.5)

(4) Loss function & Optimizer

4-a Parameters

In [10]:
# Learning rate parameter
learning_rate = 0.05

4-b Defining loss function

In [11]:
# Selecting loss function
xentropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = y_target, logits = model_output))

4-c Defining optimizer

In [12]:
# Select optimizer
my_opt = tf.train.GradientDescentOptimizer(learning_rate)
# Calling train_step to execute one training loop
train_step = my_opt.minimize(xentropy)

(5) Creating computational graph & Initailizing variables

5-a Creating computational graph

In [13]:
# Creating graph
#sess = tf.Session(config = config)

sess = tf.Session()

5-b Initializing variables

In [14]:
# Initializing variables
init = tf.global_variables_initializer()
sess.run(init)

(6) Training the model

6-a Training parameters

In [15]:
# Number of epuchs
num_epochs = 5000

6-b Training

In [16]:
# The training loop
for i in range(num_epochs):
    
    # Calling one train_step
    sess.run(train_step, feed_dict={x_data: inputs, y_target: targets})
    
    # Loggint to screen number of epochs and current loss
    if (i+1)%1000==0:
        print('Step #' + str(i+1))
        print('Loss = ' + str(sess.run(xentropy, feed_dict={x_data: inputs, y_target: targets})))
        print()
Step #1000
Loss = 0.5855846

Step #2000
Loss = 0.5498409

Step #3000
Loss = 0.5353548

Step #4000
Loss = 0.52773833

Step #5000
Loss = 0.5230772

(8) Illustration of the trained classifier

In [17]:
# Create a mesh to plot points in
# Setting boundaries of the figure
x_min = inputs[:, 0].min() - 0.2;
x_max = inputs[:, 0].max() + 0.2;
y_min = inputs[:, 1].min() - 0.2;
y_max = inputs[:, 1].max() + 0.2;

# Creating mesh
# Setting step size
h = (x_max - x_min)/100.0
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
grid_points = np.c_[xx.ravel(), yy.ravel()]

# Classification of background grids
grid_predictions = sess.run(prediction, feed_dict={x_data: grid_points})
grid_predictions = grid_predictions.reshape(xx.shape)

# Better format to plotting
grid_predictions_drawing = ((grid_predictions * 2.0) - 1.0)
# Plotting points and grid
plt.contourf(xx, yy, grid_predictions_drawing, colors=('c', 'r'), alpha=0.3)

# Plotting samples
for i in range(num_samples):
    if(targets[i] == 1.0):
        plt.plot(inputs[i,0], inputs[i,1], 'ro')
    else:
        plt.plot(inputs[i,0], inputs[i,1], 'bo')

# Setting title of the figure
plt.title('Linear classification with a single neuron')

# Plotting figure
plt.show()

(9) Closing session

In [18]:
# Releasing resources of session by inivoke closing method
sess.close()