Skip to content
Snippets Groups Projects
Commit e5084010 authored by tgupta6's avatar tgupta6
Browse files

test resnet inference

parent d47d305b
No related branches found
No related tags found
No related merge requests found
resnet/cat.jpg

137 KiB

resnet/cat2.jpg

4.47 KiB

# This is a variable scope aware configuation object for TensorFlow
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
class Config:
def __init__(self):
root = self.Scope('')
for k, v in FLAGS.__dict__['__flags'].iteritems():
root[k] = v
self.stack = [ root ]
def _pop_stale(self):
var_scope_name = tf.get_variable_scope().name
top = self.stack[0]
while not top.contains(var_scope_name):
# We aren't in this scope anymore
self.stack.pop(0)
top = self.stack[0]
def __getitem__(self, name):
self._pop_stale()
# Recursively extract value
for i in range(len(self.stack)):
cs = self.stack[i]
if name in cs:
return cs[name]
raise KeyError(name)
def __setitem__(self, name, value):
self._pop_stale()
top = self.stack[0]
var_scope_name = tf.get_variable_scope().name
assert top.contains(var_scope_name)
if top.name != var_scope_name:
top = self.Scope(var_scope_name)
self.stack.insert(0, top)
top[name] = value
class Scope(dict):
def __init__(self, name):
self.name = name
def contains(self, var_scope_name):
return var_scope_name.startswith(self.name)
# Test
if __name__ == '__main__':
def assert_raises(exception, fn):
try:
fn()
except exception:
pass
else:
assert False, "Expected exception"
c = Config()
c['hello'] = 1
assert c['hello'] == 1
with tf.variable_scope('foo'):
c['bar'] = 2
assert c['bar'] == 2
assert c['hello'] == 1
with tf.variable_scope('meow'):
c['dog'] = 3
assert c['dog'] == 3
assert c['bar'] == 2
assert c['hello'] == 1
assert_raises(KeyError, lambda: c['dog'])
assert c['bar'] == 2
assert c['hello'] == 1
import os
os.environ["GLOG_minloglevel"] = "2"
import sys
import re
#import caffe
import numpy as np
import tensorflow as tf
#import skimage.io
#from caffe.proto import caffe_pb2
#from synset import *
import inference as resnet
class CaffeParamProvider():
def __init__(self, caffe_net):
self.caffe_net = caffe_net
def conv_kernel(self, name):
k = self.caffe_net.params[name][0].data
# caffe [out_channels, in_channels, filter_height, filter_width]
# 0 1 2 3
# tensorflow [filter_height, filter_width, in_channels, out_channels]
# 2 3 1 0
return k.transpose((2, 3, 1, 0))
return k
def bn_gamma(self, name):
return self.caffe_net.params[name][0].data
def bn_beta(self, name):
return self.caffe_net.params[name][1].data
def bn_mean(self, name):
return self.caffe_net.params[name][0].data
def bn_variance(self, name):
return self.caffe_net.params[name][1].data
def fc_weights(self, name):
w = self.caffe_net.params[name][0].data
w = w.transpose((1, 0))
return w
def fc_biases(self, name):
b = self.caffe_net.params[name][1].data
return b
def preprocess(img):
"""Changes RGB [0,1] valued image to BGR [0,255] with mean subtracted."""
mean_bgr = load_mean_bgr()
print 'mean blue', np.mean(mean_bgr[:, :, 0])
print 'mean green', np.mean(mean_bgr[:, :, 1])
print 'mean red', np.mean(mean_bgr[:, :, 2])
out = np.copy(img) * 255.0
out = out[:, :, [2, 1, 0]] # swap channel from RGB to BGR
out -= mean_bgr
return out
def assert_almost_equal(caffe_tensor, tf_tensor):
t = tf_tensor[0]
c = caffe_tensor[0].transpose((1, 2, 0))
#for i in range(0, t.shape[-1]):
# print "tf", i, t[:,i]
# print "caffe", i, c[:,i]
if t.shape != c.shape:
print "t.shape", t.shape
print "c.shape", c.shape
sys.exit(1)
d = np.linalg.norm(t - c)
print "d", d
assert d < 500
# returns image of shape [224, 224, 3]
# [height, width, depth]
def load_image(path, size=224):
img = skimage.io.imread(path)
short_edge = min(img.shape[:2])
yy = int((img.shape[0] - short_edge) / 2)
xx = int((img.shape[1] - short_edge) / 2)
crop_img = img[yy:yy + short_edge, xx:xx + short_edge]
resized_img = skimage.transform.resize(crop_img, (size, size))
return resized_img
def load_mean_bgr():
""" bgr mean pixel value image, [0, 255]. [height, width, 3] """
with open("data/ResNet_mean.binaryproto", mode='rb') as f:
data = f.read()
blob = caffe_pb2.BlobProto()
blob.ParseFromString(data)
mean_bgr = caffe.io.blobproto_to_array(blob)[0]
assert mean_bgr.shape == (3, 224, 224)
return mean_bgr.transpose((1, 2, 0))
def load_caffe(img_p, layers=50):
caffe.set_mode_cpu()
prototxt = "data/ResNet-%d-deploy.prototxt" % layers
caffemodel = "data/ResNet-%d-model.caffemodel" % layers
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
net.blobs['data'].data[0] = img_p.transpose((2, 0, 1))
assert net.blobs['data'].data[0].shape == (3, 224, 224)
net.forward()
caffe_prob = net.blobs['prob'].data[0]
print_prob(caffe_prob)
return net
# returns the top1 string
def print_prob(prob):
#print prob
pred = np.argsort(prob)[::-1]
# Get top1 label
top1 = synset[pred[0]]
print "Top1: ", top1
# Get top5 label
top5 = [synset[pred[i]] for i in range(5)]
print "Top5: ", top5
return top1
def parse_tf_varnames(p, tf_varname, num_layers):
if tf_varname == 'scale1/weights':
return p.conv_kernel('conv1')
elif tf_varname == 'scale1/gamma':
return p.bn_gamma('scale_conv1')
elif tf_varname == 'scale1/beta':
return p.bn_beta('scale_conv1')
elif tf_varname == 'scale1/moving_mean':
return p.bn_mean('bn_conv1')
elif tf_varname == 'scale1/moving_variance':
return p.bn_variance('bn_conv1')
elif tf_varname == 'fc/weights':
return p.fc_weights('fc1000')
elif tf_varname == 'fc/biases':
return p.fc_biases('fc1000')
# scale2/block1/shortcut/weights
# scale3/block2/c/moving_mean
# scale3/block6/c/moving_variance
# scale4/block3/c/moving_mean
# scale4/block8/a/beta
re1 = 'scale(\d+)/block(\d+)/(shortcut|a|b|c|A|B)'
m = re.search(re1, tf_varname)
def letter(i):
return chr(ord('a') + i - 1)
scale_num = int(m.group(1))
block_num = int(m.group(2))
if scale_num == 2:
# scale 2 always uses block letters
block_str = letter(block_num)
elif scale_num == 3 or scale_num == 4:
# scale 3 uses block letters for l=50 and numbered blocks for l=101, l=151
# scale 4 uses block letters for l=50 and numbered blocks for l=101, l=151
if num_layers == 50:
block_str = letter(block_num)
else:
if block_num == 1:
block_str = 'a'
else:
block_str = 'b%d' % (block_num - 1)
elif scale_num == 5:
# scale 5 always block letters
block_str = letter(block_num)
else:
raise ValueError("unexpected scale_num %d" % scale_num)
branch = m.group(3)
if branch == "shortcut":
branch_num = 1
conv_letter = ''
else:
branch_num = 2
conv_letter = branch.lower()
x = (scale_num, block_str, branch_num, conv_letter)
#print x
if 'weights' in tf_varname:
return p.conv_kernel('res%d%s_branch%d%s' % x)
if 'gamma' in tf_varname:
return p.bn_gamma('scale%d%s_branch%d%s' % x)
if 'beta' in tf_varname:
return p.bn_beta('scale%d%s_branch%d%s' % x)
if 'moving_mean' in tf_varname:
return p.bn_mean('bn%d%s_branch%d%s' % x)
if 'moving_variance' in tf_varname:
return p.bn_variance('bn%d%s_branch%d%s' % x)
raise ValueError('unhandled var ' + tf_varname)
def checkpoint_fn(layers):
return 'ResNet-L%d.ckpt' % layers
def meta_fn(layers):
return 'ResNet-L%d.meta' % layers
def convert(graph, img, img_p, layers):
caffe_model = load_caffe(img_p, layers)
#for i, n in enumerate(caffe_model.params):
# print n
param_provider = CaffeParamProvider(caffe_model)
if layers == 50:
num_blocks = [3, 4, 6, 3]
elif layers == 101:
num_blocks = [3, 4, 23, 3]
elif layers == 152:
num_blocks = [3, 8, 36, 3]
with tf.device('/cpu:0'):
images = tf.placeholder("float32", [None, 224, 224, 3], name="images")
logits = resnet.inference(images,
is_training=False,
num_blocks=num_blocks,
preprocess=True,
bottleneck=True)
prob = tf.nn.softmax(logits, name='prob')
# We write the metagraph first to avoid adding a bunch of
# assign ops that are used to set variables from caffe.
# The checkpoint is written to at the end.
tf.train.export_meta_graph(filename=meta_fn(layers))
vars_to_restore = tf.all_variables()
saver = tf.train.Saver(vars_to_restore)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
assigns = []
for var in vars_to_restore:
#print var.op.name
data = parse_tf_varnames(param_provider, var.op.name, layers)
#print "caffe data shape", data.shape
#print "tf shape", var.get_shape()
assigns.append(var.assign(data))
sess.run(assigns)
#for op in tf.get_default_graph().get_operations():
# print op.name
i = [
graph.get_tensor_by_name("scale1/Relu:0"),
graph.get_tensor_by_name("scale2/MaxPool:0"),
graph.get_tensor_by_name("scale2/block1/Relu:0"),
graph.get_tensor_by_name("scale2/block2/Relu:0"),
graph.get_tensor_by_name("scale2/block3/Relu:0"),
graph.get_tensor_by_name("scale3/block1/Relu:0"),
graph.get_tensor_by_name("scale5/block3/Relu:0"),
graph.get_tensor_by_name("avg_pool:0"),
graph.get_tensor_by_name("prob:0"),
]
o = sess.run(i, {images: img[np.newaxis, :]})
assert_almost_equal(caffe_model.blobs['conv1'].data, o[0])
assert_almost_equal(caffe_model.blobs['pool1'].data, o[1])
assert_almost_equal(caffe_model.blobs['res2a'].data, o[2])
assert_almost_equal(caffe_model.blobs['res2b'].data, o[3])
assert_almost_equal(caffe_model.blobs['res2c'].data, o[4])
assert_almost_equal(caffe_model.blobs['res3a'].data, o[5])
assert_almost_equal(caffe_model.blobs['res5c'].data, o[6])
#assert_almost_equal(np.squeeze(caffe_model.blobs['pool5'].data), o[7])
print_prob(o[8][0])
prob_dist = np.linalg.norm(caffe_model.blobs['prob'].data - o[8])
print 'prob_dist ', prob_dist
assert prob_dist < 0.2 # XXX can this be tightened?
# We've already written the metagraph to avoid a bunch of assign ops.
saver.save(sess, checkpoint_fn(layers), write_meta_graph=False)
def save_graph(save_path):
graph = tf.get_default_graph()
graph_def = graph.as_graph_def()
print "graph_def byte size", graph_def.ByteSize()
graph_def_s = graph_def.SerializeToString()
with open(save_path, "wb") as f:
f.write(graph_def_s)
print "saved model to %s" % save_path
def main(_):
img = load_image("data/cat.jpg")
print img
img_p = preprocess(img)
for layers in [50, 101, 152]:
g = tf.Graph()
with g.as_default():
print "CONVERT", layers
convert(g, img, img_p, layers)
if __name__ == '__main__':
tf.app.run()
resnet/dalmatian.jpg

86.3 KiB

# import skimage.io # bug. need to import this before tensorflow
# import skimage.transform # bug. need to import this before tensorflow
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.training import moving_averages
from config import Config
# import datetime
import numpy as np
import os
import time
MOVING_AVERAGE_DECAY = 0.9997
BN_DECAY = MOVING_AVERAGE_DECAY
BN_EPSILON = 0.001
CONV_WEIGHT_DECAY = 0.00004
CONV_WEIGHT_STDDEV = 0.1
FC_WEIGHT_DECAY = 0.00004
FC_WEIGHT_STDDEV = 0.01
RESNET_VARIABLES = 'resnet_variables'
UPDATE_OPS_COLLECTION = 'resnet_update_ops' # must be grouped with training op
IMAGENET_MEAN_BGR = [103.062623801, 115.902882574, 123.151630838, ]
tf.app.flags.DEFINE_integer('input_size', 224, "input image size")
activation = tf.nn.relu
def inference(x, is_training,
num_classes=1000,
num_blocks=[3, 4, 6, 3], # defaults to 50-layer network
use_bias=False, # defaults to using batch norm
bottleneck=True):
c = Config()
c['bottleneck'] = bottleneck
c['is_training'] = tf.convert_to_tensor(is_training,
dtype='bool',
name='is_training')
c['ksize'] = 3
c['stride'] = 1
c['use_bias'] = use_bias
c['fc_units_out'] = num_classes
c['num_blocks'] = num_blocks
c['stack_stride'] = 2
with tf.variable_scope('scale1'):
c['conv_filters_out'] = 64
c['ksize'] = 7
c['stride'] = 2
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('scale2'):
x = _max_pool(x, ksize=3, stride=2)
c['num_blocks'] = num_blocks[0]
c['stack_stride'] = 1
c['block_filters_internal'] = 64
x = stack(x, c)
with tf.variable_scope('scale3'):
c['num_blocks'] = num_blocks[1]
c['block_filters_internal'] = 128
assert c['stack_stride'] == 2
x = stack(x, c)
with tf.variable_scope('scale4'):
c['num_blocks'] = num_blocks[2]
c['block_filters_internal'] = 256
x = stack(x, c)
with tf.variable_scope('scale5'):
c['num_blocks'] = num_blocks[3]
c['block_filters_internal'] = 512
x = stack(x, c)
# post-net
x = tf.reduce_mean(x, reduction_indices=[1, 2], name="avg_pool")
if num_classes != None:
with tf.variable_scope('fc'):
x = fc(x, c)
return x
# This is what they use for CIFAR-10 and 100.
# See Section 4.2 in http://arxiv.org/abs/1512.03385
def inference_small(x,
is_training,
num_blocks=3, # 6n+2 total weight layers will be used.
use_bias=False, # defaults to using batch norm
num_classes=10):
c = Config()
c['is_training'] = tf.convert_to_tensor(is_training,
dtype='bool',
name='is_training')
c['use_bias'] = use_bias
c['fc_units_out'] = num_classes
c['num_blocks'] = num_blocks
c['num_classes'] = num_classes
inference_small_config(x, c)
def inference_small_config(x, c):
c['bottleneck'] = False
c['ksize'] = 3
c['stride'] = 1
with tf.variable_scope('scale1'):
c['conv_filters_out'] = 16
c['block_filters_internal'] = 16
c['stack_stride'] = 1
x = conv(x, c)
x = bn(x, c)
x = activation(x)
x = stack(x, c)
with tf.variable_scope('scale2'):
c['block_filters_internal'] = 32
c['stack_stride'] = 2
x = stack(x, c)
with tf.variable_scope('scale3'):
c['block_filters_internal'] = 64
c['stack_stride'] = 2
x = stack(x, c)
# post-net
x = tf.reduce_mean(x, reduction_indices=[1, 2], name="avg_pool")
if c['num_classes'] != None:
with tf.variable_scope('fc'):
x = fc(x, c)
return x
def _imagenet_preprocess(rgb):
"""Changes RGB [0,1] valued image to BGR [0,255] with mean subtracted."""
red, green, blue = tf.split(3, 3, rgb * 255.0)
bgr = tf.concat(3, [blue, green, red])
bgr -= IMAGENET_MEAN_BGR
return bgr
def loss(logits, labels):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss_ = tf.add_n([cross_entropy_mean] + regularization_losses)
tf.scalar_summary('loss', loss_)
return loss_
def stack(x, c):
for n in range(c['num_blocks']):
s = c['stack_stride'] if n == 0 else 1
c['block_stride'] = s
with tf.variable_scope('block%d' % (n + 1)):
x = block(x, c)
return x
def block(x, c):
filters_in = x.get_shape()[-1]
# Note: filters_out isn't how many filters are outputed.
# That is the case when bottleneck=False but when bottleneck is
# True, filters_internal*4 filters are outputted. filters_internal is how many filters
# the 3x3 convs output internally.
m = 4 if c['bottleneck'] else 1
filters_out = m * c['block_filters_internal']
shortcut = x # branch 1
c['conv_filters_out'] = c['block_filters_internal']
if c['bottleneck']:
with tf.variable_scope('a'):
c['ksize'] = 1
c['stride'] = c['block_stride']
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('b'):
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('c'):
c['conv_filters_out'] = filters_out
c['ksize'] = 1
assert c['stride'] == 1
x = conv(x, c)
x = bn(x, c)
else:
with tf.variable_scope('A'):
c['stride'] = c['block_stride']
assert c['ksize'] == 3
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('B'):
c['conv_filters_out'] = filters_out
assert c['ksize'] == 3
assert c['stride'] == 1
x = conv(x, c)
x = bn(x, c)
with tf.variable_scope('shortcut'):
if filters_out != filters_in or c['block_stride'] != 1:
c['ksize'] = 1
c['stride'] = c['block_stride']
c['conv_filters_out'] = filters_out
shortcut = conv(shortcut, c)
shortcut = bn(shortcut, c)
return activation(x + shortcut)
def bn(x, c):
x_shape = x.get_shape()
params_shape = x_shape[-1:]
if c['use_bias']:
bias = _get_variable('bias', params_shape,
initializer=tf.zeros_initializer)
return x + bias
axis = list(range(len(x_shape) - 1))
beta = _get_variable('beta',
params_shape,
initializer=tf.zeros_initializer)
gamma = _get_variable('gamma',
params_shape,
initializer=tf.ones_initializer)
moving_mean = _get_variable('moving_mean',
params_shape,
initializer=tf.zeros_initializer,
trainable=False)
moving_variance = _get_variable('moving_variance',
params_shape,
initializer=tf.ones_initializer,
trainable=False)
# These ops will only be preformed when training.
mean, variance = tf.nn.moments(x, axis)
update_moving_mean = moving_averages.assign_moving_average(moving_mean,
mean, BN_DECAY)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, BN_DECAY)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
mean, variance = control_flow_ops.cond(
c['is_training'], lambda: (mean, variance),
lambda: (moving_mean, moving_variance))
x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)
#x.set_shape(inputs.get_shape()) ??
return x
def fc(x, c):
num_units_in = x.get_shape()[1]
num_units_out = c['fc_units_out']
weights_initializer = tf.truncated_normal_initializer(
stddev=FC_WEIGHT_STDDEV)
weights = _get_variable('weights',
shape=[num_units_in, num_units_out],
initializer=weights_initializer,
weight_decay=FC_WEIGHT_STDDEV)
biases = _get_variable('biases',
shape=[num_units_out],
initializer=tf.zeros_initializer)
x = tf.nn.xw_plus_b(x, weights, biases)
return x
def _get_variable(name,
shape,
initializer,
weight_decay=0.0,
dtype='float',
trainable=True):
"A little wrapper around tf.get_variable to do weight decay and add to"
"resnet collection"
if weight_decay > 0:
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
else:
regularizer = None
collections = [tf.GraphKeys.VARIABLES, RESNET_VARIABLES]
return tf.get_variable(name,
shape=shape,
initializer=initializer,
dtype=dtype,
regularizer=regularizer,
collections=collections,
trainable=trainable)
def conv(x, c):
ksize = c['ksize']
stride = c['stride']
filters_out = c['conv_filters_out']
filters_in = x.get_shape()[-1]
shape = [ksize, ksize, filters_in, filters_out]
initializer = tf.truncated_normal_initializer(stddev=CONV_WEIGHT_STDDEV)
weights = _get_variable('weights',
shape=shape,
dtype='float',
initializer=initializer,
weight_decay=CONV_WEIGHT_DECAY)
return tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')
def _max_pool(x, ksize=3, stride=2):
return tf.nn.max_pool(x,
ksize=[1, ksize, ksize, 1],
strides=[1, stride, stride, 1],
padding='SAME')
resnet/schooner.jpg

26.8 KiB

This diff is collapsed.
import os
import json
import image_io
import numpy as np
from resnet.synset import *
import resnet.inference as resnet_inference
import tftools.var_collect as var_collect
import tensorflow as tf
def print_prob(prob):
#print prob
pred = np.argsort(prob)[::-1]
# Get top1 label
top1 = synset[pred[0]]
print "Top1: ", top1
# Get top5 label
top5 = [synset[pred[i]] for i in range(5)]
print "Top5: ", top5
return top1
if __name__=='__main__':
model_dir = '/home/tanmay/Downloads/pretrained_networks/' + \
'Resnet/tensorflow-resnet-pretrained-20160509'
ckpt_filename = os.path.join(model_dir, 'ResNet-L50.ckpt')
img = image_io.imread("/home/tanmay/Code/GenVQA/GenVQA/resnet/dalmatian.jpg")
img = image_io.imresize(img, output_size=(224,224))
img = img.astype(np.float32)
sess = tf.Session()
images = tf.placeholder(tf.float32, shape=[1, 224, 224, 3], name='images')
logits = resnet_inference.inference(images, False)
prob_tensor = tf.nn.softmax(logits, name='prob')
vars_to_restore = []
for s in xrange(5):
vars_to_restore += var_collect.collect_scope('scale'+str(s+1))
vars_to_restore += var_collect.collect_scope('fc')
saver = tf.train.Saver(vars_to_restore)
saver.restore(sess, ckpt_filename)
all_vars = var_collect.collect_all()
vars_to_init = [var for var in all_vars if var not in vars_to_restore]
init = tf.initialize_variables(vars_to_init)
sess.run(init)
print "graph restored"
batch = img.reshape((1, 224, 224, 3))
feed_dict = {images: batch}
prob = sess.run(prob_tensor, feed_dict=feed_dict)
print_prob(prob[0])
import tensorflow as tf
def print_var_list(var_list, name='Variables'):
print name + ': \n' + '[' + ', '.join([var.name for var in var_list]) + ']'
def collect_name(var_name, graph=None):
if graph == None:
graph = tf.get_default_graph()
var_list = graph.get_collection(tf.GraphKeys.VARIABLES, scope=var_name)
assert_str = "No variable exists with name '{}'".format(var_name)
assert len(var_list) != 0, assert_str
assert_str = \
"Multiple variables exist with name_scope '{}'".format(var_name)
assert len(var_list) == 1, assert_str
return var_list[0]
def collect_scope(name_scope, graph=None):
if graph == None:
graph = tf.get_default_graph()
var_list = graph.get_collection(tf.GraphKeys.VARIABLES, scope=name_scope)
assert_str = "No variable exists with name_scope '{}'".format(name_scope)
assert len(var_list) != 0, assert_str
return var_list
def collect_all(graph=None):
if graph == None:
graph = tf.get_default_graph()
var_list = graph.get_collection(tf.GraphKeys.VARIABLES)
return var_list
def collect_list(var_name_list, graph=None):
var_dict = dict()
for var_name in var_name_list:
var_dict[var_name] = collect_name(var_name, graph=graph)
return var_dict
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment