diff --git a/constants.py b/constants.py
index ffa5ec8a8ed87b03061f76897dfcd1e7c64a2b29..47443cc308e07a806c7bcf89c8e13139c765d6e8 100644
--- a/constants.py
+++ b/constants.py
@@ -1,195 +1,9 @@
-import os
-import pdb
-
-def mkdir_if_not_exists(dir_name):
-    if not os.path.exists(dir_name):
-        os.mkdir(dir_name)
-        
-experiment_name = 'QA_explicit_dot_joint_training_pretrained'
-#experiment_name = 'object_attribute_classifier_large_images'
-# Global output directory (all subexperiments will be saved here)
-global_output_dir = '/home/tanmay/Code/GenVQA/Exp_Results/VQA'
-
-global_experiment_dir = os.path.join(
-    global_output_dir,
-    experiment_name)
-
-tb_log_dir = os.path.join(
-    global_experiment_dir,
-    'tensorboard_logdir')
-
-mkdir_if_not_exists(global_output_dir)
-mkdir_if_not_exists(global_experiment_dir)
-mkdir_if_not_exists(tb_log_dir)
-
-#height and width to which images are resized before feeding into networks
-image_size = (224, 224) 
-
-# Token to be used if object or attribute variable is unknown
-unknown_token = 'UNK'
-
-# Genome Data paths
-data_absolute_path = '/home/ssd/VisualGenome'
-
-image_dir = os.path.join(data_absolute_path, 'cropped_regions_large')
-genome_resnet_feat_dir = os.path.join(
-    data_absolute_path,
-    'cropped_regions_large_resnet_features')
-
-object_labels_json = os.path.join(
-    data_absolute_path,
-    'restructured/object_labels.json')
-
-attribute_labels_json = os.path.join(
-    data_absolute_path,
-    'restructured/attribute_labels.json')
-
-regions_json = os.path.join(
-    data_absolute_path,
-    'restructured/region_with_labels.json')
-
-mean_image_filename = os.path.join(
-    data_absolute_path,
-    'restructured/mean_image.jpg')
-
-vocab_json = os.path.join(
-    data_absolute_path,
-    'restructured/vocab_subset.json')
-
-num_object_labels = 1000
-num_attribute_labels = 1000
-
-# Regions data partition
-# First 80% meant to be used for training
-# Next 10% is set aside for validation
-# Last 10% is to be used for testing
-num_total_regions = 1951768
-num_train_regions = 1561416 # First 80%
-num_val_regions = 195176 # Next 10%
-num_test_regions = num_total_regions \
-                   - num_train_regions \
-                   - num_val_regions 
-
-# Pretrained resnet ckpt
-resnet_ckpt = '/home/tanmay/Downloads/pretrained_networks/' + \
-              'Resnet/tensorflow-resnet-pretrained-20160509/' + \
-              'ResNet-L50.ckpt'
-
-# Pretrained word vectors
-word2vec_binary = '/home/tanmay/Code/word2vec/word2vec-api-master/' + \
-                  'GoogleNews-vectors-negative300.bin'
-
-word_vector_size = 300
-resnet_feat_dim = 2048
-
-# Numpy matrix storing vocabulary word vectors
-pretrained_vocab_word_vectors_npy = os.path.join(
-    data_absolute_path,
-    'restructured/pretrained_vocab_word_vectors.npy')
-
-# Object Attribute Classifier Training Params
-region_batch_size = 200
-region_num_samples = num_train_regions
-region_num_epochs = 10
-region_offset = 0
-region_queue_size = 400
-region_regularization_coeff = 1e-4
-region_lr = 1e-4
-region_log_every_n_iter = 500
-region_output_dir = os.path.join(
-    global_experiment_dir,
-    'object_attribute_classifiers')
-    
-mkdir_if_not_exists(region_output_dir)
-
-region_model = os.path.join(
-    region_output_dir,
-    'model')                    
-
-# Object Attribute Finetuning Params
-region_fine_tune_from_iter = 3000
-region_fine_tune_from = region_model + '-' + str(region_fine_tune_from_iter)
-
-# Object Attribute Classifier Evaluation Params
-region_eval_on = 'train' # One of {'val','test','train'}
-region_model_to_eval = region_model + '-' + '77500'
-
-region_attribute_scores_dirname = os.path.join(
-    region_output_dir,
-    'attribute_scores')
-
-mkdir_if_not_exists(region_attribute_scores_dirname)
-
-# Answer prediction
-num_region_proposals = 100
-num_mcq_candidates = 18
-num_negative_answers = num_mcq_candidates - 1
-
-# VQA data paths
-vqa_basedir = '/home/ssd/VQA/'
-
-vqa_train_image_dir = os.path.join(
-    vqa_basedir,
-    'train2014_cropped_large')
-vqa_train_resnet_feat_dir = os.path.join(
-    vqa_basedir,
-    'train2014_cropped_large_resnet_features')
-vqa_train_anno = os.path.join(
-    vqa_basedir,
-    'mscoco_train2014_annotations_with_parsed_questions.json')
-
-vqa_val_image_dir = os.path.join(
-    vqa_basedir,
-    'val2014_cropped_large')
-vqa_val_anno = os.path.join(
-    vqa_basedir,
-    'mscoco_val2014_annotations_with_parsed_questions.json')
-
-vqa_answer_vocab_json = os.path.join(
-    vqa_basedir,
-    'answer_vocab.json')
-
-# VQA dataset params
-num_train_questions = 248349
-num_val_questions = 10000 #121512
-num_test_questions = 0
-
-# Answer classifier training params
-answer_batch_size = 50
-answer_num_epochs = 10
-answer_offset = 0
-answer_regularization_coeff = 1e-5
-answer_queue_size = 500
-answer_embedding_dim = 600
-answer_lr = 1e-4
-answer_log_every_n_iter = 500
-answer_output_dir = os.path.join(
-    global_experiment_dir,
-    'answer_classifiers')
-    
-mkdir_if_not_exists(answer_output_dir)
-
-pretrained_model = '/home/tanmay/Code/GenVQA/Exp_Results/VisualGenome/' + \
-    'object_attribute_classifier_large_images/' + \
-    'object_attribute_classifiers/model-77500'
-
-answer_model = os.path.join(
-    answer_output_dir,
-    'model')
-
-# Answer classifier additional joint training params
-num_regions_with_labels = 100
-
-# Answer fine tune params
-answer_fine_tune_from_iter = 17000
-answer_fine_tune_from = answer_model + '-' + str(answer_fine_tune_from_iter)
-
-# Answer eval params
-answer_eval_on = 'val'
-answer_model_to_eval = answer_model + '-13000'
-
-answer_eval_data_json = os.path.join(
-    answer_output_dir,
-    'eval_' + answer_eval_on + '_data.json')
-
-
+import socket
+hostname = socket.gethostname()
+
+if hostname=='vision-gpu-1':
+    from constants_vision_gpu_1 import *
+elif hostname=='vision-gpu-2':
+    from constants_vision_gpu_2 import *
+elif hostname=='crunchy':
+    from constants_crunchy import *
diff --git a/constants_crunchy.py b/constants_crunchy.py
new file mode 100644
index 0000000000000000000000000000000000000000..ffa5ec8a8ed87b03061f76897dfcd1e7c64a2b29
--- /dev/null
+++ b/constants_crunchy.py
@@ -0,0 +1,195 @@
+import os
+import pdb
+
+def mkdir_if_not_exists(dir_name):
+    if not os.path.exists(dir_name):
+        os.mkdir(dir_name)
+        
+experiment_name = 'QA_explicit_dot_joint_training_pretrained'
+#experiment_name = 'object_attribute_classifier_large_images'
+# Global output directory (all subexperiments will be saved here)
+global_output_dir = '/home/tanmay/Code/GenVQA/Exp_Results/VQA'
+
+global_experiment_dir = os.path.join(
+    global_output_dir,
+    experiment_name)
+
+tb_log_dir = os.path.join(
+    global_experiment_dir,
+    'tensorboard_logdir')
+
+mkdir_if_not_exists(global_output_dir)
+mkdir_if_not_exists(global_experiment_dir)
+mkdir_if_not_exists(tb_log_dir)
+
+#height and width to which images are resized before feeding into networks
+image_size = (224, 224) 
+
+# Token to be used if object or attribute variable is unknown
+unknown_token = 'UNK'
+
+# Genome Data paths
+data_absolute_path = '/home/ssd/VisualGenome'
+
+image_dir = os.path.join(data_absolute_path, 'cropped_regions_large')
+genome_resnet_feat_dir = os.path.join(
+    data_absolute_path,
+    'cropped_regions_large_resnet_features')
+
+object_labels_json = os.path.join(
+    data_absolute_path,
+    'restructured/object_labels.json')
+
+attribute_labels_json = os.path.join(
+    data_absolute_path,
+    'restructured/attribute_labels.json')
+
+regions_json = os.path.join(
+    data_absolute_path,
+    'restructured/region_with_labels.json')
+
+mean_image_filename = os.path.join(
+    data_absolute_path,
+    'restructured/mean_image.jpg')
+
+vocab_json = os.path.join(
+    data_absolute_path,
+    'restructured/vocab_subset.json')
+
+num_object_labels = 1000
+num_attribute_labels = 1000
+
+# Regions data partition
+# First 80% meant to be used for training
+# Next 10% is set aside for validation
+# Last 10% is to be used for testing
+num_total_regions = 1951768
+num_train_regions = 1561416 # First 80%
+num_val_regions = 195176 # Next 10%
+num_test_regions = num_total_regions \
+                   - num_train_regions \
+                   - num_val_regions 
+
+# Pretrained resnet ckpt
+resnet_ckpt = '/home/tanmay/Downloads/pretrained_networks/' + \
+              'Resnet/tensorflow-resnet-pretrained-20160509/' + \
+              'ResNet-L50.ckpt'
+
+# Pretrained word vectors
+word2vec_binary = '/home/tanmay/Code/word2vec/word2vec-api-master/' + \
+                  'GoogleNews-vectors-negative300.bin'
+
+word_vector_size = 300
+resnet_feat_dim = 2048
+
+# Numpy matrix storing vocabulary word vectors
+pretrained_vocab_word_vectors_npy = os.path.join(
+    data_absolute_path,
+    'restructured/pretrained_vocab_word_vectors.npy')
+
+# Object Attribute Classifier Training Params
+region_batch_size = 200
+region_num_samples = num_train_regions
+region_num_epochs = 10
+region_offset = 0
+region_queue_size = 400
+region_regularization_coeff = 1e-4
+region_lr = 1e-4
+region_log_every_n_iter = 500
+region_output_dir = os.path.join(
+    global_experiment_dir,
+    'object_attribute_classifiers')
+    
+mkdir_if_not_exists(region_output_dir)
+
+region_model = os.path.join(
+    region_output_dir,
+    'model')                    
+
+# Object Attribute Finetuning Params
+region_fine_tune_from_iter = 3000
+region_fine_tune_from = region_model + '-' + str(region_fine_tune_from_iter)
+
+# Object Attribute Classifier Evaluation Params
+region_eval_on = 'train' # One of {'val','test','train'}
+region_model_to_eval = region_model + '-' + '77500'
+
+region_attribute_scores_dirname = os.path.join(
+    region_output_dir,
+    'attribute_scores')
+
+mkdir_if_not_exists(region_attribute_scores_dirname)
+
+# Answer prediction
+num_region_proposals = 100
+num_mcq_candidates = 18
+num_negative_answers = num_mcq_candidates - 1
+
+# VQA data paths
+vqa_basedir = '/home/ssd/VQA/'
+
+vqa_train_image_dir = os.path.join(
+    vqa_basedir,
+    'train2014_cropped_large')
+vqa_train_resnet_feat_dir = os.path.join(
+    vqa_basedir,
+    'train2014_cropped_large_resnet_features')
+vqa_train_anno = os.path.join(
+    vqa_basedir,
+    'mscoco_train2014_annotations_with_parsed_questions.json')
+
+vqa_val_image_dir = os.path.join(
+    vqa_basedir,
+    'val2014_cropped_large')
+vqa_val_anno = os.path.join(
+    vqa_basedir,
+    'mscoco_val2014_annotations_with_parsed_questions.json')
+
+vqa_answer_vocab_json = os.path.join(
+    vqa_basedir,
+    'answer_vocab.json')
+
+# VQA dataset params
+num_train_questions = 248349
+num_val_questions = 10000 #121512
+num_test_questions = 0
+
+# Answer classifier training params
+answer_batch_size = 50
+answer_num_epochs = 10
+answer_offset = 0
+answer_regularization_coeff = 1e-5
+answer_queue_size = 500
+answer_embedding_dim = 600
+answer_lr = 1e-4
+answer_log_every_n_iter = 500
+answer_output_dir = os.path.join(
+    global_experiment_dir,
+    'answer_classifiers')
+    
+mkdir_if_not_exists(answer_output_dir)
+
+pretrained_model = '/home/tanmay/Code/GenVQA/Exp_Results/VisualGenome/' + \
+    'object_attribute_classifier_large_images/' + \
+    'object_attribute_classifiers/model-77500'
+
+answer_model = os.path.join(
+    answer_output_dir,
+    'model')
+
+# Answer classifier additional joint training params
+num_regions_with_labels = 100
+
+# Answer fine tune params
+answer_fine_tune_from_iter = 17000
+answer_fine_tune_from = answer_model + '-' + str(answer_fine_tune_from_iter)
+
+# Answer eval params
+answer_eval_on = 'val'
+answer_model_to_eval = answer_model + '-13000'
+
+answer_eval_data_json = os.path.join(
+    answer_output_dir,
+    'eval_' + answer_eval_on + '_data.json')
+
+
diff --git a/constants_vision_gpu_2.py b/constants_vision_gpu_2.py
new file mode 100644
index 0000000000000000000000000000000000000000..556e91b4aee08c0622a8d19fef57518d11f264ce
--- /dev/null
+++ b/constants_vision_gpu_2.py
@@ -0,0 +1,194 @@
+import os
+import pdb
+
+def mkdir_if_not_exists(dir_name):
+    if not os.path.exists(dir_name):
+        os.mkdir(dir_name)
+        
+experiment_name = 'QA_explicit_dot_pretrained'
+#experiment_name = 'object_attribute_classifier_large_images'
+# Global output directory (all subexperiments will be saved here)
+global_output_dir = '/home/nfs/tgupta6/projects/GenVQA/Exp_Results/VQA'
+
+global_experiment_dir = os.path.join(
+    global_output_dir,
+    experiment_name)
+
+tb_log_dir = os.path.join(
+    global_experiment_dir,
+    'tensorboard_logdir')
+
+mkdir_if_not_exists(global_output_dir)
+mkdir_if_not_exists(global_experiment_dir)
+mkdir_if_not_exists(tb_log_dir)
+
+#height and width to which images are resized before feeding into networks
+image_size = (224, 224) 
+
+# Token to be used if object or attribute variable is unknown
+unknown_token = 'UNK'
+
+# Genome Data paths
+data_absolute_path = '/home/nfs/tgupta6/data/VisualGenome'
+
+image_dir = os.path.join(data_absolute_path, 'cropped_regions_large')
+genome_resnet_feat_dir = os.path.join(
+    data_absolute_path,
+    'cropped_regions_large_resnet_features')
+
+object_labels_json = os.path.join(
+    data_absolute_path,
+    'restructured/object_labels.json')
+
+attribute_labels_json = os.path.join(
+    data_absolute_path,
+    'restructured/attribute_labels.json')
+
+regions_json = os.path.join(
+    data_absolute_path,
+    'restructured/region_with_labels.json')
+
+mean_image_filename = os.path.join(
+    data_absolute_path,
+    'restructured/mean_image.jpg')
+
+vocab_json = os.path.join(
+    data_absolute_path,
+    'restructured/vocab_subset.json')
+
+num_object_labels = 1000
+num_attribute_labels = 1000
+
+# Regions data partition
+# First 80% meant to be used for training
+# Next 10% is set aside for validation
+# Last 10% is to be used for testing
+num_total_regions = 1951768
+num_train_regions = 1561416 # First 80%
+num_val_regions = 195176 # Next 10%
+num_test_regions = num_total_regions \
+                   - num_train_regions \
+                   - num_val_regions 
+
+# Pretrained resnet ckpt
+resnet_ckpt = '/home/nfs/tgupta6/data/Resnet/' + \
+              'ResNet-L50.ckpt'
+
+# Pretrained word vectors
+word2vec_binary = '/home/nfs/tgupta6/data/word_vectors/' + \
+                  'GoogleNews-vectors-negative300.bin'
+
+word_vector_size = 300
+resnet_feat_dim = 2048
+
+# Numpy matrix storing vocabulary word vectors
+pretrained_vocab_word_vectors_npy = os.path.join(
+    data_absolute_path,
+    'restructured/pretrained_vocab_word_vectors.npy')
+
+# Object Attribute Classifier Training Params
+region_batch_size = 200
+region_num_samples = num_train_regions
+region_num_epochs = 10
+region_offset = 0
+region_queue_size = 400
+region_regularization_coeff = 1e-4
+region_lr = 1e-4
+region_log_every_n_iter = 500
+region_output_dir = os.path.join(
+    global_experiment_dir,
+    'object_attribute_classifiers')
+    
+mkdir_if_not_exists(region_output_dir)
+
+region_model = os.path.join(
+    region_output_dir,
+    'model')                    
+
+# Object Attribute Finetuning Params
+region_fine_tune_from_iter = 3000
+region_fine_tune_from = region_model + '-' + str(region_fine_tune_from_iter)
+
+# Object Attribute Classifier Evaluation Params
+region_eval_on = 'train' # One of {'val','test','train'}
+region_model_to_eval = region_model + '-' + '77500'
+
+region_attribute_scores_dirname = os.path.join(
+    region_output_dir,
+    'attribute_scores')
+
+mkdir_if_not_exists(region_attribute_scores_dirname)
+
+# Answer prediction
+num_region_proposals = 100
+num_mcq_candidates = 18
+num_negative_answers = num_mcq_candidates - 1
+
+# VQA data paths
+vqa_basedir = '/home/nfs/tgupta6/data/VQA/'
+
+vqa_train_image_dir = os.path.join(
+    vqa_basedir,
+    'train2014_cropped_large')
+vqa_train_resnet_feat_dir = os.path.join(
+    vqa_basedir,
+    'train2014_cropped_large_resnet_features')
+vqa_train_anno = os.path.join(
+    vqa_basedir,
+    'mscoco_train2014_annotations_with_parsed_questions.json')
+
+vqa_val_image_dir = os.path.join(
+    vqa_basedir,
+    'val2014_cropped_large')
+vqa_val_anno = os.path.join(
+    vqa_basedir,
+    'mscoco_val2014_annotations_with_parsed_questions.json')
+
+vqa_answer_vocab_json = os.path.join(
+    vqa_basedir,
+    'answer_vocab.json')
+
+# VQA dataset params
+num_train_questions = 248349
+num_val_questions = 10000 #121512
+num_test_questions = 0
+
+# Answer classifier training params
+answer_batch_size = 50
+answer_num_epochs = 10
+answer_offset = 0
+answer_regularization_coeff = 1e-5
+answer_queue_size = 500
+answer_embedding_dim = 600
+answer_lr = 1e-4
+answer_log_every_n_iter = 500
+answer_output_dir = os.path.join(
+    global_experiment_dir,
+    'answer_classifiers')
+    
+mkdir_if_not_exists(answer_output_dir)
+
+pretrained_model = '/home/nfs/tgupta6/projects/GenVQA/Exp_Results/' +\
+                   'pretrained_object_attribute_classifier/' +\
+                   'obj_atr_model_77500'
+
+answer_model = os.path.join(
+    answer_output_dir,
+    'model')
+
+# Answer classifier additional joint training params
+num_regions_with_labels = 100
+
+# Answer fine tune params
+answer_fine_tune_from_iter = 17000
+answer_fine_tune_from = answer_model + '-' + str(answer_fine_tune_from_iter)
+
+# Answer eval params
+answer_eval_on = 'val'
+answer_model_to_eval = answer_model + '-13000'
+
+answer_eval_data_json = os.path.join(
+    answer_output_dir,
+    'eval_' + answer_eval_on + '_data.json')
+
+