This tutorial shall give developers some insides how to use TensorFlow serving API and how to deploy and run an own model on SCP.
Even so a TensorFlow model is build up from scratch you won’t learn anything about neuronal networks or why certain values for hyperparameter where chosen.
import numpy as np
import time
import pickle
class CifarImageProvider(object):
def __init__(self, source_files, batch_size 😞
self._source = source_files
self._i = 0
self._batch_size = batch_size
self.images = None
self.labels = None
self._start_time = 0.0
def _unpickle(self, file):
with open(file, 'rb') as fo:
directory = pickle.load(fo, encoding="bytes")
return directory
def _one_hot(self, vec, vals=10):
n = len(vec)
out = np.zeros((n, vals))
out[range(n), vec] = 1
return out
def load(self):
data = [self._unpickle(f) for f in self._source]
images = np.vstack([d[b"data"] for d in data])
n = len(images)
self.images = images.reshape(n,3,32,32).transpose(0,2,3,1).astype(float)/255
self.labels = self._one_hot(np.hstack([d[b"labels"] for d in data]), 10)
return self
def next_batch(self):
x,y = self.images[self._i:self._i + self._batch_size], self.labels[self._i:self._i + self._batch_size]
self._i = (self._i + self._batch_size) % len(self.images)
return x,y
def new_epoch(self):
self._start_time = time.time()
self._i = 0
def end_epoch(self):
return time.time() - self._start_time
self.images = images.reshape(n,3,32,32).transpose(0,2,3,1).astype(float)/255
class CifarImageManager(object):
BATCH_SIZE = 100
def __init__(self, batch_size=BATCH_SIZE 😞
self.train = CifarImageProvider(["data_batch_{}".format(i) for i in range(1,6)], batch_size=batch_size).load()
self.test = CifarImageProvider(["test_batch"], batch_size=batch_size).load()
def print_statistics(self):
print("Number of images train: {}".format(len(self.train.images)))
print("Number of labels train: {}".format(len(self.train.labels)))
print("Number of images test: {}".format(len(self.test.images)))
print("Number of labels test: {}".format(len(self.test.labels)))
def train_len(self):
return len(self.train.images)
def test_len(self):
return len(self.test.images)
imageManager = CifarImageManager()
imageManager.print_statistics()
import tensorflow as tf
def weight_variable(shape):
return tf.Variable(tf.truncated_normal(shape, stddev = 0.1))
def bias_variable(shape):
return tf.Variable(tf.constant(0.1, shape = shape))
def pool_2x2(x):
return tf.nn.avg_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
def conv_layer_bn(input_layer, shape, training, activation = tf.nn.relu6):
'''
Convolutional layer with batch normalization and average pooling
Args:
input_layer: input tensor
shape: target shape respectively filter. It has for dimensions:
filter_height, filter_width, in_channels, out_channels
training: Indicator if the model is used in training or
inference, needed for batch normalization
activation: Activation function to be used
'''
with tf.name_scope("CNN-layer"):
W = weight_variable(shape)
b = bias_variable([shape[3]])
cnn = tf.nn.conv2d(input_layer, W, strides = [1, 1, 1, 1], padding = 'SAME') + b
bn = tf.layers.batch_normalization(cnn, training = training, momentum = 0.9)
return pool_2x2(activation(bn))
def full_layer(input_layer, size):
'''
Full connected layer. Main purpose here is to convert the output of the CNN layer,
the so-called feature vector, into the classes
Args:
input_layer: input tensor
size: number of output neurons
'''
in_size = int(input_layer.get_shape()[1])
W = weight_variable([in_size, size])
b = bias_variable([size])
return tf.matmul(input_layer, W) + b
def lrelu6(x, alpha = 0.2, name = "LeakyReLU"):
'''
Leaky Rectified Linear Unit with fixed saturation of 6. This activation
function is not part of tensorflow as of now.
Args:
x: tensor
alpha: slope of the negative part
'''
return tf.maximum(alpha * x, tf.minimum(6.0, x), name = name)
def prelu6(x, name = "ParameterizedReLU"):
'''
Parametric Rectified Linear Unit with fixed saturation of 6.
This activation function is not part of tensorflow as of now.
prelu learns the slope of the negative part
Args:
x: tensor
'''
with tf.variable_scope(name_or_scope = None, default_name = "prelu"):
alpha = tf.get_variable("prelu", shape = x.get_shape()[-1],
dtype = x.dtype, initializer = tf.constant_initializer(0.1))
return lrelu6(x, alpha, name = name)
#---------------------------------------------------------------------
# Prepare Saving
#---------------------------------------------------------------------
from pathlib import Path
from tensorflow.saved_model import utils
from tensorflow.saved_model import signature_def_utils
from tensorflow.saved_model import signature_constants
import datetime
def build_path(prefix):
path = Path.joinpath(Path.home(), prefix, datetime.datetime.utcnow().strftime('%Y-%m-%dT%H%M%S') )
print("Model saved to {}".format(path))
return str(path)
def predict_signature(input_layer, prediction):
tensor_info_x = utils.build_tensor_info(input_layer)
tensor_info_y = utils.build_tensor_info(prediction)
return signature_def_utils.build_signature_def(
inputs = {'images' : tensor_info_x},
outputs = {'scores' : tensor_info_y},
method_name = signature_constants.PREDICT_METHOD_NAME)
#---------------------------------------------------------------------
# Build TensorFlow Model
#---------------------------------------------------------------------
import numpy as np
import tensorflow as tf
import tf_serving as tfs
import model_elements as cifar
from tensorflow.saved_model import builder as saved_builder
from tensorflow.saved_model import tag_constants
from data_manager import CifarImageManager
from pprint import pprint
HYPER_PARAMETER= {
'TF_VERSION' : tf.__version__,
'BATCH_SIZE' : 50,
'NO_EPOCHS' : 50,
'LEARNING_RATE' : 0.00075,
'KEEP_PROB' : 0.5,
'C1' : 64,
'C2' : 128,
'C3' : 256
}
EXPORT_DIR = 'tutorial'
pprint(HYPER_PARAMETER)
def test(sess, epoche=0, end='\n'):
run_time = cifarManager.train.end_epoch()
X = cifarManager.test.images.reshape(10,1000,32,32,3)
Y = cifarManager.test.labels.reshape(10,1000,10)
acc = np.mean([sess.run(accuracy, feed_dict={x: X[i], y_: Y[i], keep_prob: 1.0}) for i in range(10)])
print("{} : {:.4}% : {}".format(epoche +1, acc*100, round(run_time, 0)), end=end)
def indicator(n):
s = '[ ] '.replace(' ', '.', n) + str(n) + '%'
print('\r', s, end='')
cifarManager = CifarImageManager(HYPER_PARAMETER['BATCH_SIZE'])
x = tf.placeholder(tf.float32, shape=[None,32,32,3], name='x')
y_ = tf.placeholder(tf.float32, shape=[None, 10], name='y_')
training = tf.placeholder_with_default(False, shape=(), name='training')
keep_prob = tf.placeholder_with_default(1.0, shape=(), name='keep_prob')
'''
Three convolutional layer
'''
conv1 = cifar.conv_layer_bn(x, shape=[3,3,3,HYPER_PARAMETER['C1']], training=training, activation=cifar.prelu6)
conv2 = cifar.conv_layer_bn(conv1, shape=[3,3,HYPER_PARAMETER['C1'],HYPER_PARAMETER['C2']], training=training, activation=cifar.prelu6)
conv3 = cifar.conv_layer_bn(conv2, shape=[3,3,HYPER_PARAMETER['C2'],HYPER_PARAMETER['C3']], training=training, activation=cifar.prelu6)
'''
Convert output of cnn-layer into feature vector, plus drop-out to prevent overfitting
'''
conv3_flat = tf.reshape(conv3, [-1, 4*4*HYPER_PARAMETER['C3']])
conv3_drop = tf.nn.dropout(conv3_flat, keep_prob=keep_prob)
'''
Convert feature vector into on hot encoding representing the 10 classes
'''
full_1 = cifar.prelu6(cifar.full_layer(conv3_drop, 512))
full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)
prediction = cifar.full_layer(full1_drop, 10)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=y_))
train_step = tf.train.AdamOptimizer(HYPER_PARAMETER['LEARNING_RATE']).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
builder = saved_builder.SavedModelBuilder(tfs.build_path(EXPORT_DIR))
with tf.Session() as sess:
cifar.sess = sess
sess.run(tf.global_variables_initializer())
steps = cifarManager.train_len() // HYPER_PARAMETER['BATCH_SIZE']
for j in range(HYPER_PARAMETER['NO_EPOCHS']):
cifarManager.train.new_epoch()
for i in range(steps):
batch = cifarManager.train.next_batch()
sess.run([train_step, extra_update_ops], feed_dict={training : True, x:batch[0], y_:batch[1], keep_prob: 0.5})
indicator(i * 100 // steps)
print(" ", end='')
test(sess, j, end='')
print()
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map={
'predict_images' : tfs.predict_signature(x, prediction),
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY : tfs.predict_signature(x, prediction) })
builder.save()
That is, we achieved ~85% accuracy for the test data and it took round about 22 seconds per epoch. In case you have halved C1, C2 and C3 it is likely that you get the following:
There is a last thing that we need to do, we have to go to the folder the TensorFlow model was saved to. Here you should find two things. A file that contains the model and a folder for the variables:
That’s it for this tutorial. In the second part, as mentioned, we will upload our model to MLF deploy it and infer some examples.
You must be a registered user to add a comment. If you've already registered, sign in. Otherwise, register and sign in.
User | Count |
---|---|
1 | |
1 | |
1 | |
1 | |
1 | |
1 |