Newer
Older
import pdb
def mkdir_if_not_exists(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
experiment_name = 'object_attribute_with_compensation_with_word_vector_prediction_high_lr'
# Global output directory (all subexperiments will be saved here)
global_output_dir = '/home/tanmay/Code/GenVQA/Exp_Results/VisualGenome'
global_experiment_dir = os.path.join(
global_output_dir,
experiment_name)
tb_log_dir = os.path.join(
global_experiment_dir,
'tensorboard_logdir')
mkdir_if_not_exists(global_output_dir)
mkdir_if_not_exists(global_experiment_dir)
mkdir_if_not_exists(tb_log_dir)
#height and width to which images are resized before feeding into networks
image_size = (80, 80)
# Token to be used if object or attribute variable is unknown
unknown_token = 'UNK'
# Data paths
data_absolute_path = '/home/ssd/VisualGenome'
image_dir = os.path.join(data_absolute_path, 'cropped_regions')
object_labels_json = os.path.join(
data_absolute_path,
'restructured/object_labels.json')
attribute_labels_json = os.path.join(
data_absolute_path,
'restructured/attribute_labels.json')
regions_json = os.path.join(
data_absolute_path,
'restructured/region_with_labels.json')
mean_image_filename = os.path.join(
data_absolute_path,
'restructured/mean_image.jpg')
vocab_json = os.path.join(
data_absolute_path,
'restructured/vocab_subset.json')
num_object_labels = 1000
num_attribute_labels = 1000
# First 80% meant to be used for training
# Last 10% is to be used for testing
num_train_regions = 1561416 # First 80%
num_val_regions = 195176 # Next 10%
num_test_regions = num_total_regions \
- num_train_regions \
- num_val_regions
# Pretrained resnet ckpt
resnet_ckpt = '/home/tanmay/Downloads/pretrained_networks/' + \
'Resnet/tensorflow-resnet-pretrained-20160509/' + \
'ResNet-L50.ckpt'
# Pretrained word vectors
word2vec_binary = '/home/tanmay/Code/word2vec/word2vec-api-master/' + \
'GoogleNews-vectors-negative300.bin'
word_vector_size = 300
# Numpy matrix storing vocabulary word vectors
pretrained_vocab_word_vectors_npy = os.path.join(
'restructured/pretrained_vocab_word_vectors.npy')
# Object Attribute Classifier Training Params
region_batch_size = 200
region_num_samples = num_train_regions
region_num_epochs = 10
region_offset = 0
region_queue_size = 400
region_regularization_coeff = 1e-4
region_lr = 1e-4
region_log_every_n_iter = 500
region_output_dir = os.path.join(
global_experiment_dir,
'object_attribute_classifiers')
mkdir_if_not_exists(region_output_dir)
region_model = os.path.join(
region_output_dir,
'model')
region_fine_tune_from_iter = 20500
region_fine_tune_from = region_model + '-' + str(region_fine_tune_from_iter)
# Object Attribute Classifier Evaluation Params
region_eval_on = 'val' # One of {'val','test','train'}
region_model_to_eval = region_model + '-' + '58000'
region_attribute_scores_dirname = os.path.join(
region_output_dir,
'attribute_scores')
mkdir_if_not_exists(region_attribute_scores_dirname)
# Answer prediction
num_region_proposals = 100
num_mcq_candidates = 18
num_negative_answers = num_mcq_candidates - 1
# VQA data paths
vqa_basedir = '/home/ssd/VQA/'
vqa_train_image_dir = os.path.join(
vqa_basedir,
'train2014_cropped')
vqa_train_anno = os.path.join(
vqa_basedir,
'mscoco_train2014_annotations_with_parsed_questions.json')
vqa_answer_vocab_json = os.path.join(
vqa_basedir,
'answer_vocab.json')